Skip to content

Commit

Permalink
event processing for repl
Browse files Browse the repository at this point in the history
  • Loading branch information
dengzhhu653 committed Oct 24, 2023
1 parent ab8c109 commit 0900171
Show file tree
Hide file tree
Showing 4 changed files with 110 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -968,6 +968,7 @@ public void accept(NotificationEvent event) {
switch (event.getEventType()) {
case MessageBuilder.ADD_PARTITION_EVENT:
case MessageBuilder.ALTER_PARTITION_EVENT:
case MessageBuilder.ALTER_PARTITIONS_EVENT:
case MessageBuilder.DROP_PARTITION_EVENT:
case MessageBuilder.ALTER_TABLE_EVENT:
case MessageBuilder.DROP_TABLE_EVENT:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.parse.repl.dump.events;

import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.Warehouse;
import org.apache.hadoop.hive.metastore.api.NotificationEvent;
import org.apache.hadoop.hive.metastore.messaging.AlterPartitionsMessage;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.EximUtil;
import org.apache.hadoop.hive.ql.parse.repl.DumpType;
import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;

public class AlterPartitionsHandler extends AbstractEventHandler<AlterPartitionsMessage> {
private final TableName tableName;
private final List<List<String>> part_vals;
// the isTruncateOp is always false now
private final boolean isTruncateOp;

AlterPartitionsHandler(NotificationEvent event) {
super(event);
AlterPartitionsMessage apm = eventMessage;
tableName = TableName.fromString(event.getCatName(), apm.getDB(), apm.getTable());
part_vals = apm.getPartitionValues();
isTruncateOp = apm.getIsTruncateOp();
}

@Override
AlterPartitionsMessage eventMessage(String stringRepresentation) {
return deserializer.getAlterPartitionsMessage(stringRepresentation);
}

@Override
public void handle(Context withinContext) throws Exception {
LOG.info("Processing#{} ALTER_PARTITIONS message : {}", fromEventId(), eventMessageAsJSON);

// We do not dump partitions during metadata only bootstrap dump (See TableExport
// .getPartitions(), for bootstrap dump we pass tableSpec with TABLE_ONLY set.). So don't
// dump partition related events for metadata-only dump.
if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) {
return;
}

Table qlMdTable = withinContext.db.getTable(tableName);
if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, true,
withinContext.getTablesForBootstrap(), withinContext.oldReplScope, withinContext.hiveConf)) {
return;
}

if (!isTruncateOp) {
withinContext.replicationSpec.setIsMetadataOnly(true);
List<String> partNames = new ArrayList<>(part_vals.size());
for (List<String> vals : part_vals) {
partNames.add(Warehouse.makePartName(qlMdTable.getPartCols(), vals));
}
List<Partition> partitions = withinContext.db.getPartitionsByNames(qlMdTable, partNames);
Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME);
EximUtil.createExportDump(
metaDataPath.getFileSystem(withinContext.hiveConf),
metaDataPath,
qlMdTable,
partitions,
withinContext.replicationSpec,
withinContext.hiveConf);
}
DumpMetaData dmd = withinContext.createDmd(this);
dmd.setPayload(eventMessageAsJSON);
dmd.write();
}

@Override
public DumpType dumpType() {
return isTruncateOp ? DumpType.EVENT_TRUNCATE_PARTITION : DumpType.EVENT_ALTER_PARTITION;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ private EventHandlerFactory() {
register(MessageBuilder.ADD_PARTITION_EVENT, AddPartitionHandler.class);
register(MessageBuilder.ALTER_DATABASE_EVENT, AlterDatabaseHandler.class);
register(MessageBuilder.ALTER_PARTITION_EVENT, AlterPartitionHandler.class);
register(MessageBuilder.ALTER_PARTITIONS_EVENT, AlterPartitionsHandler.class);
register(MessageBuilder.ALTER_TABLE_EVENT, AlterTableHandler.class);
register(MessageBuilder.CREATE_FUNCTION_EVENT, CreateFunctionHandler.class);
register(MessageBuilder.CREATE_TABLE_EVENT, CreateTableHandler.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage;
import org.apache.hadoop.hive.metastore.messaging.AlterPartitionsMessage;
import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables;
Expand Down Expand Up @@ -113,9 +114,16 @@ private Tuple extract(Context context) throws SemanticException {
writeId = alterTableMessage.getWriteId();
break;
case EVENT_ALTER_PARTITION:
AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
tableType = msg.getTableObj().getTableType();
writeId = msg.getWriteId();
try {
AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
tableType = msg.getTableObj().getTableType();
writeId = msg.getWriteId();
} catch (Exception e) {
// AlterPartitionsMessage
AlterPartitionsMessage msg = deserializer.getAlterPartitionsMessage(context.dmd.getPayload());
tableType = msg.getTableType();
writeId = msg.getWriteId();
}
break;
default:
break;
Expand Down

0 comments on commit 0900171

Please sign in to comment.