diff --git a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java index 506023b22a7d..1609c846d0e8 100644 --- a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java +++ b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java @@ -71,6 +71,7 @@ import software.amazon.awssdk.services.glue.model.GetPartitionsRequest; import software.amazon.awssdk.services.glue.model.GetPartitionsResponse; import software.amazon.awssdk.services.glue.model.GetTableRequest; +import software.amazon.awssdk.services.glue.model.KeySchemaElement; import software.amazon.awssdk.services.glue.model.PartitionIndex; import software.amazon.awssdk.services.glue.model.PartitionIndexDescriptor; import software.amazon.awssdk.services.glue.model.PartitionInput; @@ -690,7 +691,7 @@ public void managePartitionIndexes(String tableName) throws ExecutionException, GetPartitionIndexesRequest indexesRequest = GetPartitionIndexesRequest.builder().databaseName(databaseName).tableName(tableName).build(); GetPartitionIndexesResponse existingIdxsResp = awsGlue.getPartitionIndexes(indexesRequest).get(); for (PartitionIndexDescriptor idsToDelete : existingIdxsResp.partitionIndexDescriptorList()) { - LOG.warn("Dropping partition index: " + idsToDelete.indexName()); + LOG.warn("Dropping partition index: {}", idsToDelete.indexName()); DeletePartitionIndexRequest idxToDelete = DeletePartitionIndexRequest.builder() .databaseName(databaseName).tableName(tableName).indexName(idsToDelete.indexName()).build(); awsGlue.deletePartitionIndex(idxToDelete).get(); @@ -712,8 +713,8 @@ public void managePartitionIndexes(String tableName) throws ExecutionException, // for each existing index remove if not relevant anymore boolean indexesChanges = false; for (PartitionIndexDescriptor existingIdx: existingIdxsResp.partitionIndexDescriptorList()) { - List idxColumns = existingIdx.keys().stream().map(key -> key.name()).collect(Collectors.toList()); - Boolean toBeRemoved = true; + List idxColumns = existingIdx.keys().stream().map(KeySchemaElement::name).collect(Collectors.toList()); + boolean toBeRemoved = true; for (List neededIdx : partitionsIndexNeeded) { if (neededIdx.equals(idxColumns)) { toBeRemoved = false; diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java index 3b42edc383a5..535ff086e47f 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java @@ -25,9 +25,9 @@ import org.apache.hudi.common.model.HoodieCommitMetadata; import org.apache.hudi.common.model.HoodieWriteStat; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.InstantComparator; import org.apache.hudi.common.table.timeline.TimelineUtils; import org.apache.hudi.common.util.NumericUtils; @@ -194,7 +194,7 @@ public String showArchivedCommits( HoodieArchivedTimeline archivedTimeline = HoodieCLI.getTableMetaClient().getArchivedTimeline(); try { archivedTimeline.loadInstantDetailsInMemory(startTs, endTs); - HoodieTimeline timelineRange = (HoodieTimeline)archivedTimeline.findInstantsInRange(startTs, endTs); + HoodieTimeline timelineRange = archivedTimeline.findInstantsInRange(startTs, endTs); if (includeExtraMetadata) { return printCommitsWithMetadata(timelineRange, limit, sortByField, descending, headerOnly, exportTableName, partition); } else { diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java index f98ea7cecbb8..b37146b1c818 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java @@ -31,10 +31,10 @@ import org.apache.hudi.client.CompactionAdminClient.ValidationOpResult; import org.apache.hudi.common.model.HoodieTableType; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.InstantGenerator; import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; import org.apache.hudi.common.util.Option; @@ -431,7 +431,7 @@ protected static String printCompaction(HoodieCompactionPlan compactionPlan, } private static String getTmpSerializerFile() { - return TMP_DIR + UUID.randomUUID().toString() + ".ser"; + return TMP_DIR + UUID.randomUUID() + ".ser"; } private T deSerializeOperationResult(StoragePath inputPath, diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java index 524778f8994b..99310aaa3e7a 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java @@ -21,10 +21,10 @@ import org.apache.hudi.cli.HoodieCLI; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.BaseHoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline; -import org.apache.hudi.common.table.timeline.BaseHoodieTimeline; +import org.apache.hudi.common.table.timeline.HoodieTimeline; import static org.apache.hudi.cli.utils.CommitUtil.getTimeDaysAgo; import static org.apache.hudi.common.util.StringUtils.isNullOrEmpty; @@ -57,7 +57,7 @@ public static HoodieTimeline getTimelineInRange(String startTs, String endTs, bo if (includeArchivedTimeline) { HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline(); archivedTimeline.loadInstantDetailsInMemory(startTs, endTs); - return ((HoodieTimeline)archivedTimeline.findInstantsInRange(startTs, endTs)).mergeTimeline(activeTimeline); + return archivedTimeline.findInstantsInRange(startTs, endTs).mergeTimeline(activeTimeline); } return activeTimeline; } diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java index 5209465d8a93..405d4aebb396 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java @@ -32,7 +32,7 @@ public class InputStreamConsumer extends Thread { private static final Logger LOG = LoggerFactory.getLogger(InputStreamConsumer.class); - private InputStream is; + private final InputStream is; public InputStreamConsumer(InputStream is) { this.is = is; diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java index 0ce6002c5cb7..c4aa1a327ef8 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java @@ -40,8 +40,8 @@ public class SparkTempViewProvider implements TempViewProvider { private static final Logger LOG = LoggerFactory.getLogger(SparkTempViewProvider.class); - private JavaSparkContext jsc; - private SQLContext sqlContext; + private final JavaSparkContext jsc; + private final SQLContext sqlContext; public SparkTempViewProvider(String appName) { try { diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java index 1d1b9421b596..218391eccbcb 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java @@ -47,7 +47,7 @@ public abstract class AsyncCompactService extends HoodieAsyncTableService { private static final Logger LOG = LoggerFactory.getLogger(AsyncCompactService.class); private final int maxConcurrentCompaction; protected transient HoodieEngineContext context; - private transient BaseCompactor compactor; + private final transient BaseCompactor compactor; public AsyncCompactService(HoodieEngineContext context, BaseHoodieWriteClient client) { this(context, client, false); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java index fff0b713528b..cd05b78dfcf2 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java @@ -28,14 +28,14 @@ */ public class HoodieWriteCommitCallbackUtil { - private static ObjectMapper mapper = new ObjectMapper(); + private static final ObjectMapper MAPPER = new ObjectMapper(); /** * Convert data to json string format. */ public static String convertToJsonString(Object obj) { try { - return mapper.writeValueAsString(obj); + return MAPPER.writeValueAsString(obj); } catch (IOException e) { throw new HoodieCommitCallbackException("Callback service convert data to json failed", e); } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java index eac71cba191c..e9ab4c10d56d 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java @@ -259,14 +259,12 @@ public boolean isTrackingSuccessfulWrites() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("WriteStatus {"); - sb.append("fileId=").append(fileId); - sb.append(", writeStat=").append(stat); - sb.append(", globalError='").append(globalError).append('\''); - sb.append(", hasErrors='").append(hasErrors()).append('\''); - sb.append(", errorCount='").append(totalErrorRecords).append('\''); - sb.append(", errorPct='").append((100.0 * totalErrorRecords) / totalRecords).append('\''); - sb.append('}'); - return sb.toString(); + return "WriteStatus {" + "fileId=" + fileId + + ", writeStat=" + stat + + ", globalError='" + globalError + '\'' + + ", hasErrors='" + hasErrors() + '\'' + + ", errorCount='" + totalErrorRecords + '\'' + + ", errorPct='" + (100.0 * totalErrorRecords) / totalRecords + '\'' + + '}'; } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java index 8c0bc8842b91..7193e6234e55 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java @@ -67,8 +67,8 @@ public class FileSystemBasedLockProvider implements LockProvider, Serial private final transient HoodieStorage storage; private final transient StoragePath lockFile; protected LockConfiguration lockConfiguration; - private SimpleDateFormat sdf; - private LockInfo lockInfo; + private final SimpleDateFormat sdf; + private final LockInfo lockInfo; private String currentOwnerLockInfo; public FileSystemBasedLockProvider(final LockConfiguration lockConfiguration, final StorageConfiguration configuration) { diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java index 385532917c49..931d50aeb5c0 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java @@ -524,7 +524,7 @@ public class HoodieIndexConfig extends HoodieConfig { @Deprecated public static final String DEFAULT_SIMPLE_INDEX_UPDATE_PARTITION_PATH = SIMPLE_INDEX_UPDATE_PARTITION_PATH_ENABLE.defaultValue(); - private EngineType engineType; + private final EngineType engineType; /** * Use Spark engine by default. diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java index c7e14b6b4e1b..b6977d6324ca 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java @@ -835,7 +835,7 @@ public class HoodieWriteConfig extends HoodieConfig { private HoodieStorageConfig storageConfig; private HoodieTimeGeneratorConfig timeGeneratorConfig; private HoodieIndexingConfig indexingConfig; - private EngineType engineType; + private final EngineType engineType; /** * @deprecated Use {@link #TBL_NAME} and its methods instead @@ -2801,7 +2801,7 @@ public static class Builder { private boolean isCleanConfigSet = false; private boolean isArchivalConfigSet = false; private boolean isClusteringConfigSet = false; - private boolean isOptimizeConfigSet = false; + private final boolean isOptimizeConfigSet = false; private boolean isMetricsConfigSet = false; private boolean isBootstrapConfigSet = false; private boolean isMemoryConfigSet = false; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java index 11ffb785f014..2d6808c81dbe 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java @@ -90,11 +90,9 @@ public int hashCode() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("BloomIndexFileInfo {"); - sb.append(" fileId=").append(fileId); - sb.append(" minRecordKey=").append(minRecordKey); - sb.append(" maxRecordKey=").append(maxRecordKey); - sb.append('}'); - return sb.toString(); + return "BloomIndexFileInfo {" + " fileId=" + fileId + + " minRecordKey=" + minRecordKey + + " maxRecordKey=" + maxRecordKey + + '}'; } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java index ef17716637c2..a9989b21440f 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java @@ -24,7 +24,7 @@ import org.slf4j.LoggerFactory; public class DefaultHBaseQPSResourceAllocator implements HBaseIndexQPSResourceAllocator { - private HoodieWriteConfig hoodieWriteConfig; + private final HoodieWriteConfig hoodieWriteConfig; private static final Logger LOG = LoggerFactory.getLogger(DefaultHBaseQPSResourceAllocator.class); public DefaultHBaseQPSResourceAllocator(HoodieWriteConfig hoodieWriteConfig) { diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java index 15a39249d21f..73411a3885ae 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java @@ -91,8 +91,8 @@ public class HoodieMetrics { private String conflictResolutionFailureCounterName = null; private String compactionRequestedCounterName = null; private String compactionCompletedCounterName = null; - private HoodieWriteConfig config; - private String tableName; + private final HoodieWriteConfig config; + private final String tableName; private Timer rollbackTimer = null; private Timer cleanTimer = null; private Timer archiveTimer = null; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java index 8e6160b09548..fb831fb49b7b 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java @@ -114,12 +114,10 @@ public WriteOperationType getOperationType() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("WorkloadProfile {"); - sb.append("globalStat=").append(globalStat).append(", "); - sb.append("InputPartitionStat=").append(inputPartitionPathStatMap).append(", "); - sb.append("OutputPartitionStat=").append(outputPartitionPathStatMap).append(", "); - sb.append("operationType=").append(operationType); - sb.append('}'); - return sb.toString(); + return "WorkloadProfile {" + "globalStat=" + globalStat + ", " + + "InputPartitionStat=" + inputPartitionPathStatMap + ", " + + "OutputPartitionStat=" + outputPartitionPathStatMap + ", " + + "operationType=" + operationType + + '}'; } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java index 327a5a3ae798..716dce473aef 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java @@ -33,9 +33,9 @@ public class WorkloadStat implements Serializable { private long numUpdates = 0L; - private HashMap> insertLocationToCount; + private final HashMap> insertLocationToCount; - private HashMap> updateLocationToCount; + private final HashMap> updateLocationToCount; public WorkloadStat() { insertLocationToCount = new HashMap<>(); @@ -86,10 +86,8 @@ public HashMap> getInsertLocationToCount() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("WorkloadStat {"); - sb.append("numInserts=").append(numInserts).append(", "); - sb.append("numUpdates=").append(numUpdates); - sb.append('}'); - return sb.toString(); + return "WorkloadStat {" + "numInserts=" + numInserts + ", " + + "numUpdates=" + numUpdates + + '}'; } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java index 893dfe8548a4..07c622da337d 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java @@ -89,7 +89,7 @@ public class CleanPlanner implements Serializable { private final Map fgIdToPendingLogCompactionOperations; private final HoodieTable hoodieTable; private final HoodieWriteConfig config; - private transient HoodieEngineContext context; + private final transient HoodieEngineContext context; private final List savepointedTimestamps; private Option earliestCommitToRetain = Option.empty(); @@ -237,10 +237,7 @@ private boolean isAnySavepointDeleted(HoodieCleanMetadata cleanMetadata) { // check for any savepointed removed in latest compared to previous saved list List removedSavepointedTimestamps = new ArrayList<>(savepointedTimestampsFromLastClean); removedSavepointedTimestamps.removeAll(savepointedTimestamps); - if (removedSavepointedTimestamps.isEmpty()) { - return false; - } - return true; + return !removedSavepointedTimestamps.isEmpty(); } /** diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java index a6894388f6d2..4d1aa91458eb 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java @@ -151,13 +151,16 @@ protected int getPlanVersion() { * Transform {@link FileSlice} to {@link HoodieSliceInfo}. */ protected static List getFileSliceInfo(List slices) { - return slices.stream().map(slice -> new HoodieSliceInfo().newBuilder() - .setPartitionPath(slice.getPartitionPath()) - .setFileId(slice.getFileId()) - .setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING)) - .setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList())) - .setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING)) - .build()).collect(Collectors.toList()); + return slices.stream().map(slice -> { + new HoodieSliceInfo(); + return HoodieSliceInfo.newBuilder() + .setPartitionPath(slice.getPartitionPath()) + .setFileId(slice.getFileId()) + .setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING)) + .setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList())) + .setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING)) + .build(); + }).collect(Collectors.toList()); } /** diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java index 6547da642546..a59bd81218ed 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java @@ -50,12 +50,10 @@ public String getPartitionPath() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("BucketInfo {"); - sb.append("bucketType=").append(bucketType).append(", "); - sb.append("fileIdPrefix=").append(fileIdPrefix).append(", "); - sb.append("partitionPath=").append(partitionPath); - sb.append('}'); - return sb.toString(); + return "BucketInfo {" + "bucketType=" + bucketType + ", " + + "fileIdPrefix=" + fileIdPrefix + ", " + + "partitionPath=" + partitionPath + + '}'; } @Override diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java index 2cedbe865881..8861621ff96e 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java @@ -32,10 +32,8 @@ public class InsertBucket implements Serializable { @Override public String toString() { - final StringBuilder sb = new StringBuilder("InsertBucket {"); - sb.append("bucketNumber=").append(bucketNumber).append(", "); - sb.append("weight=").append(weight); - sb.append('}'); - return sb.toString(); + return "InsertBucket {" + "bucketNumber=" + bucketNumber + ", " + + "weight=" + weight + + '}'; } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java index e495d28e10bd..ef6bef69dbd0 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java @@ -32,10 +32,8 @@ public class SmallFile implements Serializable { @Override public String toString() { - final StringBuilder sb = new StringBuilder("SmallFile {"); - sb.append("location=").append(location).append(", "); - sb.append("sizeBytes=").append(sizeBytes); - sb.append('}'); - return sb.toString(); + return "SmallFile {" + "location=" + location + ", " + + "sizeBytes=" + sizeBytes + + '}'; } } \ No newline at end of file diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java index 276bec1b9bb1..7ad9df2a58b0 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java @@ -56,7 +56,7 @@ public class RunCompactionActionExecutor extends private final HoodieCompactor compactor; private final HoodieCompactionHandler compactionHandler; - private WriteOperationType operationType; + private final WriteOperationType operationType; private final HoodieMetrics metrics; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java index e83800e45daa..302d9069b235 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java @@ -53,7 +53,7 @@ public class ScheduleCompactionActionExecutor extends BaseActionExecutor> { private static final Logger LOG = LoggerFactory.getLogger(ScheduleCompactionActionExecutor.class); - private WriteOperationType operationType; + private final WriteOperationType operationType; private final Option> extraMetadata; private BaseHoodieCompactionPlanGenerator planGenerator; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/strategy/CompositeCompactionStrategy.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/strategy/CompositeCompactionStrategy.java index da90269509d5..e682816af853 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/strategy/CompositeCompactionStrategy.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/strategy/CompositeCompactionStrategy.java @@ -32,7 +32,7 @@ */ public class CompositeCompactionStrategy extends CompactionStrategy { - private List strategies; + private final List strategies; public CompositeCompactionStrategy(List strategies) { this.strategies = strategies; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java index 86a0c6f0aea7..dcff1f7e0a66 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/DirectWriteMarkers.java @@ -195,7 +195,7 @@ private Option create(StoragePath markerPath, boolean checkIfExists } catch (IOException e) { throw new HoodieException("Failed to create marker file " + markerPath, e); } - LOG.info("[direct] Created marker file " + markerPath.toString() + LOG.info("[direct] Created marker file " + markerPath + " in " + timer.endTimer() + " ms"); return Option.of(markerPath); } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java index 92e93cd4ab6a..705db13c6bca 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java @@ -88,7 +88,7 @@ public boolean deleteMarkerDir(HoodieEngineContext context, int parallelism) { return httpRequestClient.executeRequest( DELETE_MARKER_DIR_URL, paramsMap, BOOLEAN_TYPE_REFERENCE, RequestMethod.POST); } catch (IOException e) { - throw new HoodieRemoteException("Failed to delete marker directory " + markerDirPath.toString(), e); + throw new HoodieRemoteException("Failed to delete marker directory " + markerDirPath, e); } } @@ -99,7 +99,7 @@ public boolean doesMarkerDirExist() { return httpRequestClient.executeRequest( MARKERS_DIR_EXISTS_URL, paramsMap, BOOLEAN_TYPE_REFERENCE, RequestMethod.GET); } catch (IOException e) { - throw new HoodieRemoteException("Failed to check marker directory " + markerDirPath.toString(), e); + throw new HoodieRemoteException("Failed to check marker directory " + markerDirPath, e); } } @@ -112,7 +112,7 @@ public Set createdAndMergedDataPaths(HoodieEngineContext context, int pa return markerPaths.stream().map(WriteMarkers::stripMarkerSuffix).collect(Collectors.toSet()); } catch (IOException e) { throw new HoodieRemoteException("Failed to get CREATE and MERGE data file paths in " - + markerDirPath.toString(), e); + + markerDirPath, e); } } @@ -123,7 +123,7 @@ public Set allMarkerFilePaths() { return httpRequestClient.executeRequest( ALL_MARKERS_URL, paramsMap, SET_TYPE_REFERENCE, RequestMethod.GET); } catch (IOException e) { - throw new HoodieRemoteException("Failed to get all markers in " + markerDirPath.toString(), e); + throw new HoodieRemoteException("Failed to get all markers in " + markerDirPath, e); } } diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/util/HttpRequestClient.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/util/HttpRequestClient.java index d977f8afa2cf..8f758b935ceb 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/util/HttpRequestClient.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/util/HttpRequestClient.java @@ -92,7 +92,7 @@ public T executeRequest(String requestPath, Map queryParamet break; } String content = response.returnContent().asString(); - return (T) MAPPER.readValue(content, reference); + return MAPPER.readValue(content, reference); } public enum RequestMethod { diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/FlinkTaskContextSupplier.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/FlinkTaskContextSupplier.java index aab248fc3cf1..c945adc688dc 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/FlinkTaskContextSupplier.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/FlinkTaskContextSupplier.java @@ -30,7 +30,7 @@ * Flink task context supplier. */ public class FlinkTaskContextSupplier extends TaskContextSupplier { - private RuntimeContext flinkRuntimeContext; + private final RuntimeContext flinkRuntimeContext; public FlinkTaskContextSupplier(RuntimeContext flinkRuntimeContext) { this.flinkRuntimeContext = flinkRuntimeContext; diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkWriteClient.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkWriteClient.java index ba3570a0b49f..3bf6ae106ee1 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkWriteClient.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/client/HoodieFlinkWriteClient.java @@ -457,7 +457,7 @@ public void close() { } public HoodieFlinkTable getHoodieTable() { - return HoodieFlinkTable.create(config, (HoodieFlinkEngineContext) context); + return HoodieFlinkTable.create(config, context); } public Map> getPartitionToReplacedFileIds( diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/execution/ExplicitWriteHandler.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/execution/ExplicitWriteHandler.java index 59e1e3c6de41..0b6f04002f42 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/execution/ExplicitWriteHandler.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/execution/ExplicitWriteHandler.java @@ -36,7 +36,7 @@ public class ExplicitWriteHandler private final List statuses = new ArrayList<>(); - private HoodieWriteHandle handle; + private final HoodieWriteHandle handle; public ExplicitWriteHandler(HoodieWriteHandle handle) { this.handle = handle; diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/parquet/ParquetSchemaConverter.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/parquet/ParquetSchemaConverter.java index b4b425f383cc..5218526db3d9 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/parquet/ParquetSchemaConverter.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/parquet/ParquetSchemaConverter.java @@ -259,7 +259,7 @@ public static TypeInformation convertParquetTypeToTypeInfo(final Type fieldTy throw new UnsupportedOperationException( String.format( "List field [%s] in List [%s] has to be required. ", - type.toString(), fieldType.getName())); + type, fieldType.getName())); } } typeInfo = @@ -286,7 +286,7 @@ public static TypeInformation convertParquetTypeToTypeInfo(final Type fieldTy String.format( "Unrecgonized List schema [%s] according to Parquet" + " standard", - parquetGroupType.toString())); + parquetGroupType)); } } } @@ -515,7 +515,7 @@ private static Type convertField( .named(fieldName); } else { throw new UnsupportedOperationException( - "Unsupported SqlTimeTypeInfo " + typeInfo.toString()); + "Unsupported SqlTimeTypeInfo " + typeInfo); } } else { diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkInsertCommitActionExecutor.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkInsertCommitActionExecutor.java index 387aae29769b..fde1c4b7e7b7 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkInsertCommitActionExecutor.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkInsertCommitActionExecutor.java @@ -34,7 +34,7 @@ */ public class FlinkInsertCommitActionExecutor extends BaseFlinkCommitActionExecutor { - private List> inputRecords; + private final List> inputRecords; public FlinkInsertCommitActionExecutor(HoodieEngineContext context, HoodieWriteHandle writeHandle, diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkUpsertCommitActionExecutor.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkUpsertCommitActionExecutor.java index 7194593e2a6d..ceedacefd12c 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkUpsertCommitActionExecutor.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/action/commit/FlinkUpsertCommitActionExecutor.java @@ -34,7 +34,7 @@ */ public class FlinkUpsertCommitActionExecutor extends BaseFlinkCommitActionExecutor { - private List> inputRecords; + private final List> inputRecords; public FlinkUpsertCommitActionExecutor(HoodieEngineContext context, HoodieWriteHandle writeHandle, diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/upgrade/FlinkUpgradeDowngradeHelper.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/upgrade/FlinkUpgradeDowngradeHelper.java index a57857424955..dc4ab6dc4f92 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/upgrade/FlinkUpgradeDowngradeHelper.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/table/upgrade/FlinkUpgradeDowngradeHelper.java @@ -21,7 +21,6 @@ import org.apache.hudi.client.BaseHoodieWriteClient; import org.apache.hudi.client.HoodieFlinkWriteClient; -import org.apache.hudi.client.common.HoodieFlinkEngineContext; import org.apache.hudi.common.engine.HoodieEngineContext; import org.apache.hudi.config.HoodieWriteConfig; import org.apache.hudi.keygen.constant.KeyGeneratorOptions; @@ -45,7 +44,7 @@ public static FlinkUpgradeDowngradeHelper getInstance() { @Override public HoodieTable getTable(HoodieWriteConfig config, HoodieEngineContext context) { - return HoodieFlinkTable.create(config, (HoodieFlinkEngineContext) context); + return HoodieFlinkTable.create(config, context); } @Override diff --git a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/client/HoodieJavaWriteClient.java b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/client/HoodieJavaWriteClient.java index 4742c25c557d..23deb05181f4 100644 --- a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/client/HoodieJavaWriteClient.java +++ b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/client/HoodieJavaWriteClient.java @@ -18,7 +18,6 @@ package org.apache.hudi.client; -import org.apache.hudi.client.common.HoodieJavaEngineContext; import org.apache.hudi.client.embedded.EmbeddedTimelineService; import org.apache.hudi.common.data.HoodieListData; import org.apache.hudi.common.engine.HoodieEngineContext; @@ -68,7 +67,7 @@ public HoodieJavaWriteClient(HoodieEngineContext context, @Override public List> filterExists(List> hoodieRecords) { // Create a Hoodie table which encapsulated the commits and files visible - HoodieJavaTable table = HoodieJavaTable.create(config, (HoodieJavaEngineContext) context); + HoodieJavaTable table = HoodieJavaTable.create(config, context); Timer.Context indexTimer = metrics.getIndexCtx(); List> recordsWithLocation = getIndex().tagLocation(HoodieListData.eager(hoodieRecords), context, table).collectAsList(); metrics.updateIndexMetrics(LOOKUP_STR, metrics.getDurationInMs(indexTimer == null ? 0L : indexTimer.stop())); diff --git a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/execution/bulkinsert/JavaGlobalSortPartitioner.java b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/execution/bulkinsert/JavaGlobalSortPartitioner.java index 5317914a9cde..6913634846ca 100644 --- a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/execution/bulkinsert/JavaGlobalSortPartitioner.java +++ b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/execution/bulkinsert/JavaGlobalSortPartitioner.java @@ -43,16 +43,10 @@ public List> repartitionRecords(List> records, public int compare(Object o1, Object o2) { HoodieRecord o11 = (HoodieRecord) o1; HoodieRecord o22 = (HoodieRecord) o2; - String left = new StringBuilder() - .append(o11.getPartitionPath()) - .append("+") - .append(o11.getRecordKey()) - .toString(); - String right = new StringBuilder() - .append(o22.getPartitionPath()) - .append("+") - .append(o22.getRecordKey()) - .toString(); + String left = o11.getPartitionPath() + + "+" + o11.getRecordKey(); + String right = o22.getPartitionPath() + + "+" + o22.getRecordKey(); return left.compareTo(right); } }); diff --git a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaInsertCommitActionExecutor.java b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaInsertCommitActionExecutor.java index ec4f987df66c..73618e0848c8 100644 --- a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaInsertCommitActionExecutor.java +++ b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaInsertCommitActionExecutor.java @@ -30,7 +30,7 @@ public class JavaInsertCommitActionExecutor extends BaseJavaCommitActionExecutor { - private List> inputRecords; + private final List> inputRecords; public JavaInsertCommitActionExecutor(HoodieEngineContext context, HoodieWriteConfig config, diff --git a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertCommitActionExecutor.java b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertCommitActionExecutor.java index 34ec4e792c71..adacfa2f46c3 100644 --- a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertCommitActionExecutor.java +++ b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertCommitActionExecutor.java @@ -30,7 +30,7 @@ public class JavaUpsertCommitActionExecutor extends BaseJavaCommitActionExecutor { - private List> inputRecords; + private final List> inputRecords; public JavaUpsertCommitActionExecutor(HoodieEngineContext context, HoodieWriteConfig config, diff --git a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertPartitioner.java b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertPartitioner.java index 04b552747448..1dfde9a8aa7e 100644 --- a/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertPartitioner.java +++ b/hudi-client/hudi-java-client/src/main/java/org/apache/hudi/table/action/commit/JavaUpsertPartitioner.java @@ -67,19 +67,19 @@ public class JavaUpsertPartitioner implements Partitioner { /** * Stat for the input and output workload. Describe the workload before and after being assigned buckets. */ - private WorkloadProfile workloadProfile; + private final WorkloadProfile workloadProfile; /** * Helps decide which bucket an incoming update should go to. */ - private HashMap updateLocationToBucket; + private final HashMap updateLocationToBucket; /** * Helps us pack inserts into 1 or more buckets depending on number of incoming records. */ - private HashMap> partitionPathToInsertBucketInfos; + private final HashMap> partitionPathToInsertBucketInfos; /** * Remembers what type each bucket is for later. */ - private HashMap bucketInfoMap; + private final HashMap bucketInfoMap; protected final HoodieTable table; diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/HoodieSparkCompactor.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/HoodieSparkCompactor.java index a7017c726ae1..85d93c8aced7 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/HoodieSparkCompactor.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/HoodieSparkCompactor.java @@ -35,7 +35,7 @@ public class HoodieSparkCompactor extends BaseCompactor>, JavaRDD, JavaRDD> { private static final Logger LOG = LoggerFactory.getLogger(HoodieSparkCompactor.class); - private transient HoodieEngineContext context; + private final transient HoodieEngineContext context; public HoodieSparkCompactor(BaseHoodieWriteClient>, JavaRDD, JavaRDD> compactionClient, HoodieEngineContext context) { diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/SparkRDDReadClient.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/SparkRDDReadClient.java index 7c07468c43c8..0cc902e43b3d 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/SparkRDDReadClient.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/SparkRDDReadClient.java @@ -67,7 +67,7 @@ public class SparkRDDReadClient implements Serializable { * base path pointing to the table. Until, then just always assume a BloomIndex */ private final transient HoodieIndex index; - private HoodieTable hoodieTable; + private final HoodieTable hoodieTable; private transient Option sqlContextOpt; private final transient HoodieSparkEngineContext context; private final transient StorageConfiguration storageConf; diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java index 95055e582bc2..5ea6ea375ea2 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/validator/SparkPreCommitValidator.java @@ -50,10 +50,10 @@ public abstract class SparkPreCommitValidator> { private static final Logger LOG = LoggerFactory.getLogger(SparkPreCommitValidator.class); - private HoodieSparkTable table; - private HoodieEngineContext engineContext; - private HoodieWriteConfig writeConfig; - private HoodieMetrics metrics; + private final HoodieSparkTable table; + private final HoodieEngineContext engineContext; + private final HoodieWriteConfig writeConfig; + private final HoodieMetrics metrics; protected SparkPreCommitValidator(HoodieSparkTable table, HoodieEngineContext engineContext, HoodieWriteConfig writeConfig) { this.table = table; diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/BulkInsertMapFunction.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/BulkInsertMapFunction.java index c54d579224a9..1efd9b3affda 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/BulkInsertMapFunction.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/BulkInsertMapFunction.java @@ -37,13 +37,13 @@ public class BulkInsertMapFunction implements Function2>, Iterator>> { - private String instantTime; - private boolean areRecordsSorted; - private HoodieWriteConfig config; - private HoodieTable hoodieTable; - private boolean useWriterSchema; - private BulkInsertPartitioner partitioner; - private WriteHandleFactory writeHandleFactory; + private final String instantTime; + private final boolean areRecordsSorted; + private final HoodieWriteConfig config; + private final HoodieTable hoodieTable; + private final boolean useWriterSchema; + private final BulkInsertPartitioner partitioner; + private final WriteHandleFactory writeHandleFactory; public BulkInsertMapFunction(String instantTime, boolean areRecordsSorted, HoodieWriteConfig config, HoodieTable hoodieTable, diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/GlobalSortPartitioner.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/GlobalSortPartitioner.java index 308ee1992e30..285253f8fc47 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/GlobalSortPartitioner.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/GlobalSortPartitioner.java @@ -55,11 +55,7 @@ public JavaRDD> repartitionRecords(JavaRDD> reco // Let's use "partitionPath + key" as the sort key. Spark, will ensure // the records split evenly across RDD partitions, such that small partitions fit // into 1 RDD partition, while big ones spread evenly across multiple RDD partitions - return new StringBuilder() - .append(record.getPartitionPath()) - .append("+") - .append(record.getRecordKey()) - .toString(); + return record.getPartitionPath() + "+" + record.getRecordKey(); }, true, outputSparkPartitions); } diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionSortPartitioner.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionSortPartitioner.java index 0f8132207048..ae3aa7d34c80 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionSortPartitioner.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/execution/bulkinsert/RDDPartitionSortPartitioner.java @@ -59,15 +59,12 @@ public JavaRDD> repartitionRecords(JavaRDD> reco return records.coalesce(outputSparkPartitions) .mapToPair(record -> new Tuple2<>( - new StringBuilder() - .append(record.getPartitionPath()) - .append("+") - .append(record.getRecordKey()) - .toString(), record)) + record.getPartitionPath() + "+" + + record.getRecordKey(), record)) .mapPartitions(partition -> { // Sort locally in partition List>> recordList = new ArrayList<>(); - for (; partition.hasNext(); ) { + while (partition.hasNext()) { recordList.add(partition.next()); } Collections.sort(recordList, (o1, o2) -> o1._1.compareTo(o2._1)); diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java index 17a0b24ebf8a..d2eb15196831 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/bloom/BucketizedBloomCheckPartitioner.java @@ -59,12 +59,12 @@ public class BucketizedBloomCheckPartitioner extends Partitioner { private static final Logger LOG = LoggerFactory.getLogger(BucketizedBloomCheckPartitioner.class); - private int partitions; + private final int partitions; /** * Stores the final mapping of a file group to a list of partitions for its keys. */ - private Map> fileGroupToPartitions; + private final Map> fileGroupToPartitions; /** * Create a partitioner that computes a plan based on provided workload characteristics. @@ -147,7 +147,7 @@ public int getPartition(Object key) { // TODO replace w/ more performant hash final long hashOfKey = NumericUtils.getMessageDigestHash("MD5", parts.getRight()); final List candidatePartitions = fileGroupToPartitions.get(parts.getLeft()); - final int idx = (int) Math.floorMod((int) hashOfKey, candidatePartitions.size()); + final int idx = Math.floorMod((int) hashOfKey, candidatePartitions.size()); assert idx >= 0; return candidatePartitions.get(idx); } diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java index 3662e5da880f..b6be1df61f1d 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java @@ -114,12 +114,13 @@ public class SparkHoodieHBaseIndex extends HoodieIndex { private static final byte[] PARTITION_PATH_COLUMN = getUTF8Bytes("partition_path"); private static final Logger LOG = LoggerFactory.getLogger(SparkHoodieHBaseIndex.class); - private static Connection hbaseConnection = null; + private static Connection HBASE_CONNECTION = null; + private static transient Thread SHUTDOWN_THREAD; + private HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator = null; private int maxQpsPerRegionServer; private long totalNumInserts; private int numWriteStatusWithInserts; - private static transient Thread shutdownThread; /** * multiPutBatchSize will be computed and re-set in updateLocation if @@ -197,15 +198,15 @@ private Connection getHBaseConnection() { * exits. */ private void addShutDownHook() { - if (null == shutdownThread) { - shutdownThread = new Thread(() -> { + if (null == SHUTDOWN_THREAD) { + SHUTDOWN_THREAD = new Thread(() -> { try { - hbaseConnection.close(); + HBASE_CONNECTION.close(); } catch (Exception e) { // fail silently for any sort of exception } }); - Runtime.getRuntime().addShutdownHook(shutdownThread); + Runtime.getRuntime().addShutdownHook(SHUTDOWN_THREAD); } } @@ -244,12 +245,12 @@ private Function2>, Iterator> taggedRecords = new ArrayList<>(); - try (HTable hTable = (HTable) hbaseConnection.getTable(TableName.valueOf(tableName))) { + try (HTable hTable = (HTable) HBASE_CONNECTION.getTable(TableName.valueOf(tableName))) { List statements = new ArrayList<>(); List currentBatchOfRecords = new LinkedList<>(); // Do the tagging. @@ -340,15 +341,15 @@ private Function2, Iterator> updateL List writeStatusList = new ArrayList<>(); // Grab the global HBase connection synchronized (SparkHoodieHBaseIndex.class) { - if (hbaseConnection == null || hbaseConnection.isClosed()) { - hbaseConnection = getHBaseConnection(); + if (HBASE_CONNECTION == null || HBASE_CONNECTION.isClosed()) { + HBASE_CONNECTION = getHBaseConnection(); } } final long startTimeForPutsTask = DateTime.now().getMillis(); LOG.info("startTimeForPutsTask for this task: " + startTimeForPutsTask); final RateLimiter limiter = RateLimiter.create(multiPutBatchSize, TimeUnit.SECONDS); - try (BufferedMutator mutator = hbaseConnection.getBufferedMutator(TableName.valueOf(tableName))) { + try (BufferedMutator mutator = HBASE_CONNECTION.getBufferedMutator(TableName.valueOf(tableName))) { while (statusIterator.hasNext()) { WriteStatus writeStatus = statusIterator.next(); List mutations = new ArrayList<>(); @@ -596,13 +597,13 @@ public boolean rollbackCommit(String instantTime) { } synchronized (SparkHoodieHBaseIndex.class) { - if (hbaseConnection == null || hbaseConnection.isClosed()) { - hbaseConnection = getHBaseConnection(); + if (HBASE_CONNECTION == null || HBASE_CONNECTION.isClosed()) { + HBASE_CONNECTION = getHBaseConnection(); } } final RateLimiter limiter = RateLimiter.create(multiPutBatchSize, TimeUnit.SECONDS); - try (HTable hTable = (HTable) hbaseConnection.getTable(TableName.valueOf(tableName)); - BufferedMutator mutator = hbaseConnection.getBufferedMutator(TableName.valueOf(tableName))) { + try (HTable hTable = (HTable) HBASE_CONNECTION.getTable(TableName.valueOf(tableName)); + BufferedMutator mutator = HBASE_CONNECTION.getBufferedMutator(TableName.valueOf(tableName))) { Long rollbackTime = TimelineUtils.parseDateFromInstantTime(instantTime).getTime(); Long currentTime = new Date().getTime(); Scan scan = new Scan(); @@ -682,7 +683,7 @@ public boolean isImplicitWithStorage() { } public void setHbaseConnection(Connection hbaseConnection) { - SparkHoodieHBaseIndex.hbaseConnection = hbaseConnection; + SparkHoodieHBaseIndex.HBASE_CONNECTION = hbaseConnection; } /** @@ -691,7 +692,7 @@ public void setHbaseConnection(Connection hbaseConnection) { * that are based on inserts in each WriteStatus. */ public static class WriteStatusPartitioner extends Partitioner { - private int totalPartitions; + private final int totalPartitions; final Map fileIdPartitionMap; public WriteStatusPartitioner(final Map fileIdPartitionMap, final int totalPartitions) { diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java index 7710170fd48a..fda9a3871f05 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java @@ -39,7 +39,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.parquet.hadoop.ParquetReader; -import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.schema.MessageType; import org.apache.spark.sql.HoodieInternalRowUtils; import org.apache.spark.sql.catalyst.InternalRow; @@ -64,7 +63,7 @@ public class HoodieSparkParquetReader implements HoodieSparkFileReader { private final StoragePath path; private final HoodieStorage storage; private final FileFormatUtils parquetUtils; - private List readerIterators = new ArrayList<>(); + private final List readerIterators = new ArrayList<>(); private Option structTypeOption = Option.empty(); private Option schemaOption = Option.empty(); @@ -124,7 +123,7 @@ public ClosableIterator getUnsafeRowIterator(StructType requestedSche storage.getConf().set(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA(), readSchemaJson); storage.getConf().set(SQLConf.PARQUET_BINARY_AS_STRING().key(), SQLConf.get().getConf(SQLConf.PARQUET_BINARY_AS_STRING()).toString()); storage.getConf().set(SQLConf.PARQUET_INT96_AS_TIMESTAMP().key(), SQLConf.get().getConf(SQLConf.PARQUET_INT96_AS_TIMESTAMP()).toString()); - ParquetReader reader = ParquetReader.builder((ReadSupport) new ParquetReadSupport(), new Path(path.toUri())) + ParquetReader reader = ParquetReader.builder(new ParquetReadSupport(), new Path(path.toUri())) .withConf(storage.getConf().unwrapAs(Configuration.class)) .build(); UnsafeProjection projection = evolution.generateUnsafeProjection(); diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metrics/DistributedRegistry.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metrics/DistributedRegistry.java index 673211cfa0ae..6cd7e8a274ef 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metrics/DistributedRegistry.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/metrics/DistributedRegistry.java @@ -33,7 +33,7 @@ */ public class DistributedRegistry extends AccumulatorV2, Map> implements Registry, Serializable { - private String name; + private final String name; ConcurrentHashMap counters = new ConcurrentHashMap<>(); public DistributedRegistry(String name) { diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/sort/SpaceCurveSortingHelper.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/sort/SpaceCurveSortingHelper.java index eb35d0cae372..f616e5080edb 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/sort/SpaceCurveSortingHelper.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/sort/SpaceCurveSortingHelper.java @@ -56,6 +56,7 @@ import javax.annotation.Nonnull; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -143,7 +144,7 @@ private static StructType composeOrderedRDDStructType(StructType schema) { return StructType$.MODULE$.apply( CollectionUtils.combine( Arrays.asList(schema.fields()), - Arrays.asList(new StructField("Index", BinaryType$.MODULE$, true, Metadata.empty())) + Collections.singletonList(new StructField("Index", BinaryType$.MODULE$, true, Metadata.empty())) ) ); } @@ -151,12 +152,13 @@ private static StructType composeOrderedRDDStructType(StructType schema) { private static JavaRDD createZCurveSortedRDD(JavaRDD originRDD, Map fieldMap, int fieldNum, int fileNum) { return originRDD.map(row -> { byte[][] zBytes = fieldMap.entrySet().stream() - .map(entry -> { - int index = entry.getKey(); - StructField field = entry.getValue(); - return mapColumnValueTo8Bytes(row, index, field.dataType()); - }) - .toArray(byte[][]::new); + .map(entry -> { + int index = entry.getKey(); + + StructField field = entry.getValue(); + return mapColumnValueTo8Bytes(row, index, field.dataType()); + }) + .toArray(byte[][]::new); // Interleave received bytes to produce Z-curve ordinal byte[] zOrdinalBytes = BinaryUtil.interleaving(zBytes, 8); @@ -225,7 +227,7 @@ private static byte[] mapColumnValueTo8Bytes(Row row, int index, DataType dataTy } else if (dataType instanceof DecimalType) { return BinaryUtil.longTo8Byte(row.isNullAt(index) ? Long.MAX_VALUE : row.getDecimal(index).longValue()); } else if (dataType instanceof BooleanType) { - boolean value = row.isNullAt(index) ? false : row.getBoolean(index); + boolean value = !row.isNullAt(index) && row.getBoolean(index); return BinaryUtil.intTo8Byte(value ? 1 : 0); } else if (dataType instanceof BinaryType) { return BinaryUtil.paddingTo8Byte(row.isNullAt(index) ? new byte[] {0} : (byte[]) row.get(index)); @@ -242,7 +244,7 @@ private static long mapColumnValueToLong(Row row, int index, DataType dataType) } else if (dataType instanceof IntegerType) { return row.isNullAt(index) ? Long.MAX_VALUE : (long) row.getInt(index); } else if (dataType instanceof FloatType) { - return row.isNullAt(index) ? Long.MAX_VALUE : Double.doubleToLongBits((double) row.getFloat(index)); + return row.isNullAt(index) ? Long.MAX_VALUE : Double.doubleToLongBits(row.getFloat(index)); } else if (dataType instanceof StringType) { return row.isNullAt(index) ? Long.MAX_VALUE : BinaryUtil.convertStringToLong(row.getString(index)); } else if (dataType instanceof DateType) { @@ -256,7 +258,7 @@ private static long mapColumnValueToLong(Row row, int index, DataType dataType) } else if (dataType instanceof DecimalType) { return row.isNullAt(index) ? Long.MAX_VALUE : row.getDecimal(index).longValue(); } else if (dataType instanceof BooleanType) { - boolean value = row.isNullAt(index) ? false : row.getBoolean(index); + boolean value = !row.isNullAt(index) && row.getBoolean(index); return value ? Long.MAX_VALUE : 0; } else if (dataType instanceof BinaryType) { return row.isNullAt(index) ? Long.MAX_VALUE : BinaryUtil.convertBytesToLong((byte[]) row.get(index)); diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java index 0869cb6729d1..02a070ae65b9 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/SparkDeletePartitionCommitActionExecutor.java @@ -49,7 +49,7 @@ public class SparkDeletePartitionCommitActionExecutor extends SparkInsertOverwriteCommitActionExecutor { - private List partitions; + private final List partitions; public SparkDeletePartitionCommitActionExecutor(HoodieEngineContext context, HoodieWriteConfig config, HoodieTable table, String instantTime, List partitions) { diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/UpsertPartitioner.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/UpsertPartitioner.java index 146a01d08868..f8a299e73c33 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/UpsertPartitioner.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/commit/UpsertPartitioner.java @@ -73,15 +73,15 @@ public class UpsertPartitioner extends SparkHoodiePartitioner { /** * Helps decide which bucket an incoming update should go to. */ - private HashMap updateLocationToBucket; + private final HashMap updateLocationToBucket; /** * Helps us pack inserts into 1 or more buckets depending on number of incoming records. */ - private HashMap> partitionPathToInsertBucketInfos; + private final HashMap> partitionPathToInsertBucketInfos; /** * Remembers what type each bucket is for later. */ - private HashMap bucketInfoMap; + private final HashMap bucketInfoMap; protected final HoodieWriteConfig config; private final WriteOperationType operationType; diff --git a/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java b/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java index b7bf072b218b..27e326fd2c5f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java +++ b/hudi-common/src/main/java/org/apache/hudi/avro/AvroSchemaCompatibility.java @@ -588,12 +588,12 @@ public enum SchemaCompatibilityType { /** * Used internally to tag a reader/writer schema pair and prevent recursion. */ - RECURSION_IN_PROGRESS; + RECURSION_IN_PROGRESS } public enum SchemaIncompatibilityType { NAME_MISMATCH, FIXED_SIZE_MISMATCH, MISSING_ENUM_SYMBOLS, READER_FIELD_MISSING_DEFAULT_VALUE, TYPE_MISMATCH, - MISSING_UNION_BRANCH; + MISSING_UNION_BRANCH } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java b/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java index 7e67e41581ec..59c18c00936d 100644 --- a/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/avro/HoodieAvroUtils.java @@ -1105,7 +1105,7 @@ private static Object rewritePrimaryTypeWithDiffSchemaType(Object oldValue, Sche return String.valueOf(oldValue); } if (oldSchema.getType() == Schema.Type.BYTES) { - return String.valueOf(((ByteBuffer) oldValue)); + return String.valueOf(oldValue); } if (oldSchema.getLogicalType() == LogicalTypes.date()) { return toJavaDate((Integer) oldValue).toString(); diff --git a/hudi-common/src/main/java/org/apache/hudi/avro/processors/Parser.java b/hudi-common/src/main/java/org/apache/hudi/avro/processors/Parser.java index 4a6e4ee9fb92..286516a28cb9 100644 --- a/hudi-common/src/main/java/org/apache/hudi/avro/processors/Parser.java +++ b/hudi-common/src/main/java/org/apache/hudi/avro/processors/Parser.java @@ -46,7 +46,7 @@ public Pair handleStringValue(String value) { public static class DateParser extends Parser { - private static long MILLI_SECONDS_PER_DAY = 86400000; + private static final long MILLI_SECONDS_PER_DAY = 86400000; @Override public Pair handleNumberValue(Number value) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/HoodieJsonPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/HoodieJsonPayload.java index f2158a1c9e8a..8c134f2f4cfc 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/HoodieJsonPayload.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/HoodieJsonPayload.java @@ -96,7 +96,7 @@ private String unCompressData(byte[] data) throws IOException { private String getFieldFromJsonOrFail(String field) throws IOException { JsonNode node = new ObjectMapper().readTree(getJsonData()); if (!node.has(field)) { - throw new HoodieException("Field :" + field + " not found in payload => " + node.toString()); + throw new HoodieException("Field :" + field + " not found in payload => " + node); } return node.get(field).textValue(); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/bloom/HashFunction.java b/hudi-common/src/main/java/org/apache/hudi/common/bloom/HashFunction.java index c6e6deb87273..a3531720be77 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/bloom/HashFunction.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/bloom/HashFunction.java @@ -64,17 +64,17 @@ public final class HashFunction { /** * The number of hashed values. */ - private int nbHash; + private final int nbHash; /** * The maximum highest returned value. */ - private int maxValue; + private final int maxValue; /** * Hashing algorithm to use. */ - private Hash hashFunction; + private final Hash hashFunction; /** * Constructor. diff --git a/hudi-common/src/main/java/org/apache/hudi/common/bootstrap/FileStatusUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/bootstrap/FileStatusUtils.java index 5593b2f7f53b..94df94fe9ebf 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/bootstrap/FileStatusUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/bootstrap/FileStatusUtils.java @@ -33,7 +33,7 @@ public static StoragePathInfo toStoragePathInfo(HoodieFileStatus fileStatus) { return new StoragePathInfo( new StoragePath(fileStatus.getPath().getUri()), fileStatus.getLength(), - fileStatus.getIsDir() == null ? false : fileStatus.getIsDir(), + fileStatus.getIsDir() != null && fileStatus.getIsDir(), fileStatus.getBlockReplication().shortValue(), fileStatus.getBlockSize(), fileStatus.getModificationTime()); } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java index fcf3bb033d84..cfbe62d74ffc 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java @@ -51,7 +51,7 @@ public class HoodieStorageConfig extends HoodieConfig { public static final ConfigProperty PARQUET_PAGE_SIZE = ConfigProperty .key("hoodie.parquet.page.size") - .defaultValue(String.valueOf(1 * 1024 * 1024)) + .defaultValue(String.valueOf(1024 * 1024)) .markAdvanced() .withDocumentation("Parquet page size in bytes. Page is the unit of read within a parquet file. " + "Within a block, pages are compressed separately."); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/engine/HoodieEngineContext.java b/hudi-common/src/main/java/org/apache/hudi/common/engine/HoodieEngineContext.java index 8f5e7ebaa222..269cb1566131 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/engine/HoodieEngineContext.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/engine/HoodieEngineContext.java @@ -46,7 +46,7 @@ public abstract class HoodieEngineContext { /** * A wrapped hadoop configuration which can be serialized. */ - private StorageConfiguration storageConf; + private final StorageConfiguration storageConf; protected TaskContextSupplier taskContextSupplier; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/ConsistencyGuardConfig.java b/hudi-common/src/main/java/org/apache/hudi/common/fs/ConsistencyGuardConfig.java index 58fd2b5feebd..32714132ad3d 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/fs/ConsistencyGuardConfig.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/ConsistencyGuardConfig.java @@ -193,7 +193,7 @@ public ConsistencyGuardConfig build() { * @deprecated use {@link #INITIAL_CHECK_INTERVAL_MS} and its methods. */ @Deprecated - private static long DEFAULT_INITIAL_CONSISTENCY_CHECK_INTERVAL_MS = INITIAL_CHECK_INTERVAL_MS.defaultValue(); + private static final long DEFAULT_INITIAL_CONSISTENCY_CHECK_INTERVAL_MS = INITIAL_CHECK_INTERVAL_MS.defaultValue(); /** * @deprecated use {@link #MAX_CHECK_INTERVAL_MS} and its methods. */ @@ -203,7 +203,7 @@ public ConsistencyGuardConfig build() { * @deprecated use {@link #MAX_CHECK_INTERVAL_MS} and its methods. */ @Deprecated - private static long DEFAULT_MAX_CONSISTENCY_CHECK_INTERVAL_MS = MAX_CHECK_INTERVAL_MS.defaultValue(); + private static final long DEFAULT_MAX_CONSISTENCY_CHECK_INTERVAL_MS = MAX_CHECK_INTERVAL_MS.defaultValue(); /** * @deprecated use {@link #MAX_CHECKS} and its methods. */ @@ -213,7 +213,7 @@ public ConsistencyGuardConfig build() { * @deprecated use {@link #MAX_CHECKS} and its methods. */ @Deprecated - private static int DEFAULT_MAX_CONSISTENCY_CHECKS = MAX_CHECKS.defaultValue(); + private static final int DEFAULT_MAX_CONSISTENCY_CHECKS = MAX_CHECKS.defaultValue(); /** * @deprecated use {@link #OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS} and its methods. */ @@ -223,7 +223,7 @@ public ConsistencyGuardConfig build() { * @deprecated use {@link #OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS} and its methods. */ @Deprecated - private static long DEFAULT_OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS_PROP = OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS.defaultValue(); + private static final long DEFAULT_OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS_PROP = OPTIMISTIC_CONSISTENCY_GUARD_SLEEP_TIME_MS.defaultValue(); /** * @deprecated use {@link #OPTIMISTIC_CONSISTENCY_GUARD_ENABLE} and its methods. */ @@ -233,5 +233,5 @@ public ConsistencyGuardConfig build() { * @deprecated use {@link #OPTIMISTIC_CONSISTENCY_GUARD_ENABLE} and its methods. */ @Deprecated - private static boolean DEFAULT_ENABLE_OPTIMISTIC_CONSISTENCY_GUARD = OPTIMISTIC_CONSISTENCY_GUARD_ENABLE.defaultValue(); + private static final boolean DEFAULT_ENABLE_OPTIMISTIC_CONSISTENCY_GUARD = OPTIMISTIC_CONSISTENCY_GUARD_ENABLE.defaultValue(); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/SizeAwareDataOutputStream.java b/hudi-common/src/main/java/org/apache/hudi/common/fs/SizeAwareDataOutputStream.java index 350665d2521c..b528540b0327 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/fs/SizeAwareDataOutputStream.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/SizeAwareDataOutputStream.java @@ -30,9 +30,9 @@ public class SizeAwareDataOutputStream { // Actual outputStream - private DataOutputStream outputStream; + private final DataOutputStream outputStream; // Counter to keep track of number of bytes written - private AtomicLong size; + private final AtomicLong size; public SizeAwareDataOutputStream(FileOutputStream fileOutputStream, int cacheSize) { this.outputStream = new DataOutputStream(new BufferedOutputStream(fileOutputStream, cacheSize)); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/ConsistentHashingNode.java b/hudi-common/src/main/java/org/apache/hudi/common/model/ConsistentHashingNode.java index 8c931685ac2c..b336cff816b1 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/ConsistentHashingNode.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/ConsistentHashingNode.java @@ -79,11 +79,9 @@ public NodeTag getTag() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("ConsistentHashingNode{"); - sb.append("value=").append(value); - sb.append(", fileIdPfx='").append(fileIdPrefix).append('\''); - sb.append('}'); - return sb.toString(); + return "ConsistentHashingNode{" + "value=" + value + + ", fileIdPfx='" + fileIdPrefix + '\'' + + '}'; } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/DefaultHoodieRecordPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/DefaultHoodieRecordPayload.java index 40f7558a2b69..c8da6dace661 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/DefaultHoodieRecordPayload.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/DefaultHoodieRecordPayload.java @@ -48,7 +48,7 @@ public class DefaultHoodieRecordPayload extends OverwriteWithLatestAvroPayload { public static final String DELETE_KEY = "hoodie.payload.delete.field"; public static final String DELETE_MARKER = "hoodie.payload.delete.marker"; private Option eventTime = Option.empty(); - private AtomicBoolean isDeleteComputed = new AtomicBoolean(false); + private final AtomicBoolean isDeleteComputed = new AtomicBoolean(false); private boolean isDefaultRecordPayloadDeleted = false; public DefaultHoodieRecordPayload(GenericRecord record, Comparable orderingVal) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/FileSlice.java b/hudi-common/src/main/java/org/apache/hudi/common/model/FileSlice.java index 6e2d16e025d9..add6ae5d79cb 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/FileSlice.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/FileSlice.java @@ -162,13 +162,11 @@ public boolean isEmpty() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("FileSlice {"); - sb.append("fileGroupId=").append(fileGroupId); - sb.append(", baseCommitTime=").append(baseInstantTime); - sb.append(", baseFile='").append(baseFile).append('\''); - sb.append(", logFiles='").append(logFiles).append('\''); - sb.append('}'); - return sb.toString(); + return "FileSlice {" + "fileGroupId=" + fileGroupId + + ", baseCommitTime=" + baseInstantTime + + ", baseFile='" + baseFile + '\'' + + ", logFiles='" + logFiles + '\'' + + '}'; } @Override diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieFileGroup.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieFileGroup.java index 1fcc003165b6..6fb8853fbaf7 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieFileGroup.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieFileGroup.java @@ -235,12 +235,10 @@ public Stream getAllBaseFiles() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieFileGroup {"); - sb.append("id=").append(fileGroupId); - sb.append(", fileSlices='").append(fileSlices).append('\''); - sb.append(", lastInstant='").append(lastInstant).append('\''); - sb.append('}'); - return sb.toString(); + return "HoodieFileGroup {" + "id=" + fileGroupId + + ", fileSlices='" + fileSlices + '\'' + + ", lastInstant='" + lastInstant + '\'' + + '}'; } public void addFileSlice(FileSlice slice) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieKey.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieKey.java index 1cefe761bd08..4595f2b10ca0 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieKey.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieKey.java @@ -85,10 +85,8 @@ public int hashCode() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieKey {"); - sb.append(" recordKey=").append(recordKey); - sb.append(" partitionPath=").append(partitionPath); - sb.append('}'); - return sb.toString(); + return "HoodieKey {" + " recordKey=" + recordKey + + " partitionPath=" + partitionPath + + '}'; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecord.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecord.java index 84eabf78f1ab..e36d7398c9c4 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecord.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecord.java @@ -297,12 +297,10 @@ public int hashCode() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieRecord{"); - sb.append("key=").append(key); - sb.append(", currentLocation='").append(currentLocation).append('\''); - sb.append(", newLocation='").append(newLocation).append('\''); - sb.append('}'); - return sb.toString(); + return "HoodieRecord{" + "key=" + key + + ", currentLocation='" + currentLocation + '\'' + + ", newLocation='" + newLocation + '\'' + + '}'; } public String getPartitionPath() { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordGlobalLocation.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordGlobalLocation.java index 08e6c6c66d82..aedbec5071c5 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordGlobalLocation.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordGlobalLocation.java @@ -47,13 +47,11 @@ public HoodieRecordGlobalLocation(String partitionPath, String instantTime, Stri @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieGlobalRecordLocation {"); - sb.append("partitionPath=").append(partitionPath).append(", "); - sb.append("instantTime=").append(instantTime).append(", "); - sb.append("fileId=").append(fileId).append(", "); - sb.append("position=").append(position); - sb.append('}'); - return sb.toString(); + return "HoodieGlobalRecordLocation {" + "partitionPath=" + partitionPath + ", " + + "instantTime=" + instantTime + ", " + + "fileId=" + fileId + ", " + + "position=" + position + + '}'; } @Override diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordLocation.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordLocation.java index 16417db63da8..499022abfd68 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordLocation.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieRecordLocation.java @@ -71,12 +71,10 @@ public int hashCode() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieRecordLocation {"); - sb.append("instantTime=").append(instantTime).append(", "); - sb.append("fileId=").append(fileId).append(", "); - sb.append("position=").append(position); - sb.append('}'); - return sb.toString(); + return "HoodieRecordLocation {" + "instantTime=" + instantTime + ", " + + "fileId=" + fileId + ", " + + "position=" + position + + '}'; } public String getInstantTime() { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/OverwriteWithLatestAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/OverwriteWithLatestAvroPayload.java index b0ed1ef22a22..d8977c967365 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/OverwriteWithLatestAvroPayload.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/OverwriteWithLatestAvroPayload.java @@ -62,7 +62,7 @@ public Option getInsertValue(Schema schema) throws IOException { return Option.empty(); } - return Option.of((IndexedRecord) HoodieAvroUtils.bytesToAvro(recordBytes, schema)); + return Option.of(HoodieAvroUtils.bytesToAvro(recordBytes, schema)); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/PartialUpdateAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/PartialUpdateAvroPayload.java index 8c4801485101..522eea451f99 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/PartialUpdateAvroPayload.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/PartialUpdateAvroPayload.java @@ -216,7 +216,7 @@ public Option getInsertValue(Schema schema, boolean isPreCombinin return Option.empty(); } - return Option.of((IndexedRecord) HoodieAvroUtils.bytesToAvro(recordBytes, schema)); + return Option.of(HoodieAvroUtils.bytesToAvro(recordBytes, schema)); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java b/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java index 2f28ab2abe3b..e453dde1eb86 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/RewriteAvroPayload.java @@ -31,7 +31,7 @@ */ public class RewriteAvroPayload implements HoodieRecordPayload { - private GenericRecord record; + private final GenericRecord record; public RewriteAvroPayload(GenericRecord record) { this.record = record; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java index 8a49d012b716..f9cdbb3ca958 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java @@ -784,12 +784,10 @@ public int hashCode() { @Override public String toString() { - final StringBuilder sb = new StringBuilder("HoodieTableMetaClient{"); - sb.append("basePath='").append(basePath).append('\''); - sb.append(", metaPath='").append(metaPath).append('\''); - sb.append(", tableType=").append(tableType); - sb.append('}'); - return sb.toString(); + return "HoodieTableMetaClient{" + "basePath='" + basePath + '\'' + + ", metaPath='" + metaPath + '\'' + + ", tableType=" + tableType + + '}'; } public void initializeBootstrapDirsIfNotExists() throws IOException { @@ -1004,7 +1002,7 @@ public static class TableBuilder { * Persist the configs that is written at the first time, and should not be changed. * Like KeyGenerator's configs. */ - private Properties others = new Properties(); + private final Properties others = new Properties(); TableBuilder() { } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCInferenceCase.java b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCInferenceCase.java index 6722860ad8ef..bf4267a8e7fc 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCInferenceCase.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCInferenceCase.java @@ -72,6 +72,6 @@ public enum HoodieCDCInferenceCase { BASE_FILE_INSERT, BASE_FILE_DELETE, LOG_FILE, - REPLACE_COMMIT; + REPLACE_COMMIT } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieFileSliceReader.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieFileSliceReader.java index a988d7e41943..4d0aaf05b5e3 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieFileSliceReader.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieFileSliceReader.java @@ -36,13 +36,13 @@ import java.util.Properties; public class HoodieFileSliceReader extends LogFileIterator { - private Option> baseFileIterator; - private HoodieMergedLogRecordScanner scanner; - private Schema schema; - private Properties props; + private final Option> baseFileIterator; + private final HoodieMergedLogRecordScanner scanner; + private final Schema schema; + private final Properties props; - private TypedProperties payloadProps = new TypedProperties(); - private Option> simpleKeyGenFieldsOpt; + private final TypedProperties payloadProps = new TypedProperties(); + private final Option> simpleKeyGenFieldsOpt; Map records; HoodieRecordMerger merger; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java index 094b50e07055..5202f68e0279 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFileReader.java @@ -83,7 +83,7 @@ public class HoodieLogFileReader implements HoodieLogFormat.Reader { private final boolean reverseReader; private final boolean enableRecordLookups; private boolean closed = false; - private SeekableDataInputStream inputStream; + private final SeekableDataInputStream inputStream; public HoodieLogFileReader(HoodieStorage storage, HoodieLogFile logFile, Schema readerSchema, int bufferSize) throws IOException { this(storage, logFile, readerSchema, bufferSize, false); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatReverseReader.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatReverseReader.java index 433f2e746563..b746eabd7324 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatReverseReader.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/HoodieLogFormatReverseReader.java @@ -49,7 +49,7 @@ public class HoodieLogFormatReverseReader implements HoodieLogFormat.Reader { private final boolean reverseLogReader; private final String recordKeyField; private final boolean enableInlineReading; - private int bufferSize; + private final int bufferSize; private int logFilePos = -1; HoodieLogFormatReverseReader(HoodieStorage storage, List logFiles, Schema readerSchema, diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDataBlock.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDataBlock.java index b413d82fd2fa..887d17d268a0 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDataBlock.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDataBlock.java @@ -73,7 +73,7 @@ public abstract class HoodieDataBlock extends HoodieLogBlock { protected final boolean shouldWriteRecordPositions; // Map of string schema to parsed schema. - private static ConcurrentHashMap schemaMap = new ConcurrentHashMap<>(); + private static final ConcurrentHashMap SCHEMA_MAP = new ConcurrentHashMap<>(); /** * NOTE: This ctor is used on the write-path (ie when records ought to be written into the log) @@ -313,8 +313,8 @@ protected Option getRecordKey(HoodieRecord record) { protected Schema getSchemaFromHeader() { String schemaStr = getLogBlockHeader().get(HeaderMetadataType.SCHEMA); - schemaMap.computeIfAbsent(schemaStr, (schemaString) -> new Schema.Parser().parse(schemaString)); - return schemaMap.get(schemaStr); + SCHEMA_MAP.computeIfAbsent(schemaStr, (schemaString) -> new Schema.Parser().parse(schemaString)); + return SCHEMA_MAP.get(schemaStr); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDeleteBlock.java b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDeleteBlock.java index 3f5bfe51c788..311e6bdd96e6 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDeleteBlock.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieDeleteBlock.java @@ -175,10 +175,10 @@ private byte[] serializeV3() throws IOException { private static DeleteRecord[] deserialize(int version, byte[] data) throws IOException { if (version == 1) { // legacy version - HoodieKey[] keys = SerializationUtils.deserialize(data); + HoodieKey[] keys = SerializationUtils.deserialize(data); return Arrays.stream(keys).map(DeleteRecord::create).toArray(DeleteRecord[]::new); } else if (version == 2) { - return SerializationUtils.deserialize(data); + return SerializationUtils.deserialize(data); } else { DatumReader reader = new SpecificDatumReader<>(HoodieDeleteRecordList.class); BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(data, 0, data.length, null); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java b/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java index df6097eb7f78..6d2ca28d7bbf 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java @@ -268,13 +268,8 @@ protected boolean shouldSkip(T record, String keyFieldName, boolean isFullKey, S } // When the record key matches with one of the keys or key prefixes, can not skip. - if ((isFullKey && keys.contains(recordKey)) - || (!isFullKey && keys.stream().anyMatch(recordKey::startsWith))) { - return false; - } - - // Otherwise, this record is not needed. - return true; + return (!isFullKey || !keys.contains(recordKey)) + && (isFullKey || keys.stream().noneMatch(recordKey::startsWith)); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/ArchivedTimelineLoader.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/ArchivedTimelineLoader.java index 96a765e4085d..5f6bf00ca755 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/ArchivedTimelineLoader.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/ArchivedTimelineLoader.java @@ -18,10 +18,12 @@ package org.apache.hudi.common.table.timeline; -import org.apache.avro.generic.GenericRecord; import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.avro.generic.GenericRecord; + import javax.annotation.Nullable; + import java.io.Serializable; import java.util.function.BiConsumer; import java.util.function.Function; @@ -37,7 +39,7 @@ public interface ArchivedTimelineLoader extends Serializable { * @param commitsFilter Filter of the instant type. * @param recordConsumer Consumer of the instant record payload. */ - public void loadInstants( + void loadInstants( HoodieTableMetaClient metaClient, @Nullable HoodieArchivedTimeline.TimeRangeFilter filter, HoodieArchivedTimeline.LoadMode loadMode, diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/CompletionTimeQueryView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/CompletionTimeQueryView.java index 8e9f229030eb..1b4b99146e69 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/CompletionTimeQueryView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/CompletionTimeQueryView.java @@ -26,22 +26,22 @@ public interface CompletionTimeQueryView extends AutoCloseable { - public boolean isCompleted(String beginInstantTime); + boolean isCompleted(String beginInstantTime); /** * Returns whether the instant is archived. */ - public boolean isArchived(String instantTime); + boolean isArchived(String instantTime); /** * Returns whether the give instant time {@code instantTime} completed before the base instant {@code baseInstant}. */ - public boolean isCompletedBefore(String baseInstant, String instantTime); + boolean isCompletedBefore(String baseInstant, String instantTime); /** * Returns whether the given instant time {@code instantTime} is sliced after or on the base instant {@code baseInstant}. */ - public boolean isSlicedAfterOrOn(String baseInstant, String instantTime); + boolean isSlicedAfterOrOn(String baseInstant, String instantTime); /** * Get completion time with a base instant time as a reference to fix the compatibility. @@ -51,7 +51,7 @@ public interface CompletionTimeQueryView extends AutoCloseable { * * @return Probability fixed completion time. */ - public Option getCompletionTime(String baseInstant, String instantTime); + Option getCompletionTime(String baseInstant, String instantTime); /** * Queries the completion time with given instant time. @@ -60,7 +60,7 @@ public interface CompletionTimeQueryView extends AutoCloseable { * * @return The completion time if the instant finished or empty if it is still pending. */ - public Option getCompletionTime(String beginTime); + Option getCompletionTime(String beginTime); /** * Queries the instant times with given completion time range. @@ -74,7 +74,7 @@ public interface CompletionTimeQueryView extends AutoCloseable { * * @return The sorted instant time list. */ - public List getInstantTimes( + List getInstantTimes( HoodieTimeline timeline, Option startCompletionTime, Option endCompletionTime, @@ -90,7 +90,7 @@ public List getInstantTimes( * * @return The sorted instant time list. */ - public List getInstantTimes( + List getInstantTimes( String startCompletionTime, String endCompletionTime, Function earliestInstantTimeFunc); @@ -99,11 +99,11 @@ public List getInstantTimes( * Get Cursor Instant * @return */ - public String getCursorInstant(); + String getCursorInstant(); /** * Return true if the table is empty. * @return */ - public boolean isEmptyTable(); + boolean isEmptyTable(); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java index 37d893517b7f..9a213ec4f425 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieActiveTimeline.java @@ -39,28 +39,28 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * Return Valid extensions expected in active timeline. * @return */ - public Set getValidExtensionsInActiveTimeline(); + Set getValidExtensionsInActiveTimeline(); /** * Create a complete instant and save to storage with a completion time. * @param instant the complete instant. */ - public void createCompleteInstant(HoodieInstant instant); + void createCompleteInstant(HoodieInstant instant); /** * Create a pending instant and save to storage. * @param instant the pending instant. */ - public void createNewInstant(HoodieInstant instant); + void createNewInstant(HoodieInstant instant); - public void createRequestedCommitWithReplaceMetadata(String instantTime, String actionType); + void createRequestedCommitWithReplaceMetadata(String instantTime, String actionType); /** * Save Completed instant in active timeline. * @param instant Instant to be saved. * @param data Metadata to be written in the instant file. */ - public void saveAsComplete(HoodieInstant instant, Option data); + void saveAsComplete(HoodieInstant instant, Option data); /** * Save Completed instant in active timeline. @@ -68,80 +68,80 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param instant Instant to be saved. * @param data Metadata to be written in the instant file. */ - public void saveAsComplete(boolean shouldLock, HoodieInstant instant, Option data); + void saveAsComplete(boolean shouldLock, HoodieInstant instant, Option data); /** * Delete Compaction requested instant file from timeline. * @param instant Instant to be deleted. */ - public HoodieInstant revertToInflight(HoodieInstant instant); + HoodieInstant revertToInflight(HoodieInstant instant); /** * Delete inflight instant file from timeline. * @param instant Instant to be deleted. */ - public void deleteInflight(HoodieInstant instant); + void deleteInflight(HoodieInstant instant); /** * Delete pending instant file from timeline. * @param instant Instant to be deleted. */ - public void deletePending(HoodieInstant instant); + void deletePending(HoodieInstant instant); /** * Delete completed rollback instant file from timeline. * @param instant Instant to be deleted. */ - public void deleteCompletedRollback(HoodieInstant instant); + void deleteCompletedRollback(HoodieInstant instant); /** * Delete empty instant file from timeline. * @param instant Instant to be deleted. */ - public void deleteEmptyInstantIfExists(HoodieInstant instant); + void deleteEmptyInstantIfExists(HoodieInstant instant); /** * Delete Compaction requested instant file from timeline. * @param instant Instant to be deleted. */ - public void deleteCompactionRequested(HoodieInstant instant); + void deleteCompactionRequested(HoodieInstant instant); /** * Note: This method should only be used in the case that delete requested/inflight instant or empty clean instant, * and completed commit instant in an archive operation. */ - public void deleteInstantFileIfExists(HoodieInstant instant); + void deleteInstantFileIfExists(HoodieInstant instant); /** * Returns most recent instant having valid schema in its {@link HoodieCommitMetadata} */ - public Option> getLastCommitMetadataWithValidSchema(); + Option> getLastCommitMetadataWithValidSchema(); /** * Get the last instant with valid data, and convert this to HoodieCommitMetadata */ - public Option> getLastCommitMetadataWithValidData(); + Option> getLastCommitMetadataWithValidData(); /** * Read cleaner Info from instant file. * @param instant Instant to read from. * @return */ - public Option readCleanerInfoAsBytes(HoodieInstant instant); + Option readCleanerInfoAsBytes(HoodieInstant instant); /** * Read rollback info from instant file. * @param instant Instant to read from. * @return */ - public Option readRollbackInfoAsBytes(HoodieInstant instant); + Option readRollbackInfoAsBytes(HoodieInstant instant); /** * Read Restore info from instant file. * @param instant Instant to read from. * @return */ - public Option readRestoreInfoAsBytes(HoodieInstant instant); + Option readRestoreInfoAsBytes(HoodieInstant instant); //----------------------------------------------------------------- // BEGIN - COMPACTION RELATED META-DATA MANAGEMENT. @@ -152,14 +152,14 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param instant Instant to read from. * @return */ - public Option readCompactionPlanAsBytes(HoodieInstant instant); + Option readCompactionPlanAsBytes(HoodieInstant instant); /** * Read Index Plan from instant file. * @param instant Instant to read from. * @return */ - public Option readIndexPlanAsBytes(HoodieInstant instant); + Option readIndexPlanAsBytes(HoodieInstant instant); /** * Revert instant state from inflight to requested. @@ -167,7 +167,7 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param inflightInstant Inflight Instant * @return requested instant */ - public HoodieInstant revertInstantFromInflightToRequested(HoodieInstant inflightInstant); + HoodieInstant revertInstantFromInflightToRequested(HoodieInstant inflightInstant); /** * TODO: This method is not needed, since log compaction plan is not a immutable plan. @@ -176,7 +176,7 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param inflightInstant Inflight Instant * @return requested instant */ - public HoodieInstant revertLogCompactionInflightToRequested(HoodieInstant inflightInstant); + HoodieInstant revertLogCompactionInflightToRequested(HoodieInstant inflightInstant); /** * Transition Compaction State from requested to inflight. @@ -184,7 +184,7 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param requestedInstant Requested instant * @return inflight instant */ - public HoodieInstant transitionCompactionRequestedToInflight(HoodieInstant requestedInstant); + HoodieInstant transitionCompactionRequestedToInflight(HoodieInstant requestedInstant); /** * Transition LogCompaction State from requested to inflight. @@ -192,7 +192,7 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param requestedInstant Requested instant * @return inflight instant */ - public HoodieInstant transitionLogCompactionRequestedToInflight(HoodieInstant requestedInstant); + HoodieInstant transitionLogCompactionRequestedToInflight(HoodieInstant requestedInstant); /** * Transition Compaction State from inflight to Committed. @@ -202,8 +202,8 @@ public interface HoodieActiveTimeline extends HoodieTimeline { * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionCompactionInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, - Option data); + HoodieInstant transitionCompactionInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, + Option data); /** * Transition Log Compaction State from inflight to Committed. @@ -213,8 +213,8 @@ public HoodieInstant transitionCompactionInflightToComplete(boolean shouldLock, * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionLogCompactionInflightToComplete(boolean shouldLock, - HoodieInstant inflightInstant, Option data); + HoodieInstant transitionLogCompactionInflightToComplete(boolean shouldLock, + HoodieInstant inflightInstant, Option data); //----------------------------------------------------------------- // END - COMPACTION RELATED META-DATA MANAGEMENT @@ -228,8 +228,8 @@ public HoodieInstant transitionLogCompactionInflightToComplete(boolean shouldLoc * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionCleanInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, - Option data); + HoodieInstant transitionCleanInflightToComplete(boolean shouldLock, HoodieInstant inflightInstant, + Option data); /** * Transition Clean State from requested to inflight. @@ -238,7 +238,7 @@ public HoodieInstant transitionCleanInflightToComplete(boolean shouldLock, Hoodi * @param data Optional data to be stored * @return commit instant */ - public HoodieInstant transitionCleanRequestedToInflight(HoodieInstant requestedInstant, Option data); + HoodieInstant transitionCleanRequestedToInflight(HoodieInstant requestedInstant, Option data); /** * Transition Rollback State from inflight to Committed. @@ -248,8 +248,8 @@ public HoodieInstant transitionCleanInflightToComplete(boolean shouldLock, Hoodi * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, - HoodieInstant inflightInstant, Option data); + HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, + HoodieInstant inflightInstant, Option data); /** * Transition Rollback State from requested to inflight. @@ -257,7 +257,7 @@ public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, * @param requestedInstant requested instant * @return commit instant */ - public HoodieInstant transitionRollbackRequestedToInflight(HoodieInstant requestedInstant); + HoodieInstant transitionRollbackRequestedToInflight(HoodieInstant requestedInstant); /** * Transition Restore State from requested to inflight. @@ -265,7 +265,7 @@ public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, * @param requestedInstant requested instant * @return commit instant */ - public HoodieInstant transitionRestoreRequestedToInflight(HoodieInstant requestedInstant); + HoodieInstant transitionRestoreRequestedToInflight(HoodieInstant requestedInstant); /** * Transition replace requested file to replace inflight. @@ -274,7 +274,7 @@ public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, * @param data Extra Metadata * @return inflight instant */ - public HoodieInstant transitionReplaceRequestedToInflight(HoodieInstant requestedInstant, Option data); + HoodieInstant transitionReplaceRequestedToInflight(HoodieInstant requestedInstant, Option data); /** * Transition cluster requested file to cluster inflight. @@ -283,7 +283,7 @@ public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, * @param data Extra Metadata * @return inflight instant */ - public HoodieInstant transitionClusterRequestedToInflight(HoodieInstant requestedInstant, Option data); + HoodieInstant transitionClusterRequestedToInflight(HoodieInstant requestedInstant, Option data); /** * Transition replace inflight to Committed. @@ -293,8 +293,8 @@ public HoodieInstant transitionRollbackInflightToComplete(boolean shouldLock, * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionReplaceInflightToComplete(boolean shouldLock, - HoodieInstant inflightInstant, Option data); + HoodieInstant transitionReplaceInflightToComplete(boolean shouldLock, + HoodieInstant inflightInstant, Option data); /** * Transition cluster inflight to replace committed. @@ -304,37 +304,37 @@ public HoodieInstant transitionReplaceInflightToComplete(boolean shouldLock, * @param data Extra Metadata * @return commit instant */ - public HoodieInstant transitionClusterInflightToComplete(boolean shouldLock, - HoodieInstant inflightInstant, Option data); + HoodieInstant transitionClusterInflightToComplete(boolean shouldLock, + HoodieInstant inflightInstant, Option data); /** * Save Restore requested instant with metadata. * @param commitType Instant type. * @param inFlightInstant Instant timestamp. */ - public void transitionRequestedToInflight(String commitType, String inFlightInstant); + void transitionRequestedToInflight(String commitType, String inFlightInstant); /** * Save Restore requested instant with metadata. * @param requested Instant to save. * @param content Metadata to be stored in instant file. */ - public void transitionRequestedToInflight(HoodieInstant requested, Option content); + void transitionRequestedToInflight(HoodieInstant requested, Option content); /** * Save Restore requested instant with metadata. * @param requested Instant to save. * @param content Metadata to be stored in instant file. */ - public void transitionRequestedToInflight(HoodieInstant requested, Option content, - boolean allowRedundantTransitions); + void transitionRequestedToInflight(HoodieInstant requested, Option content, + boolean allowRedundantTransitions); /** * Save Compaction requested instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToCompactionRequested(HoodieInstant instant, Option content); + void saveToCompactionRequested(HoodieInstant instant, Option content); /** * Save Compaction requested instant with metadata. @@ -342,14 +342,14 @@ public void transitionRequestedToInflight(HoodieInstant requested, Option content, boolean overwrite); + void saveToCompactionRequested(HoodieInstant instant, Option content, boolean overwrite); /** * Save Log Compaction requested instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToLogCompactionRequested(HoodieInstant instant, Option content); + void saveToLogCompactionRequested(HoodieInstant instant, Option content); /** * Save Log Compaction requested instant with metadata. @@ -357,42 +357,42 @@ public void transitionRequestedToInflight(HoodieInstant requested, Option content, boolean overwrite); + void saveToLogCompactionRequested(HoodieInstant instant, Option content, boolean overwrite); /** * Save pending replace instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToPendingReplaceCommit(HoodieInstant instant, Option content); + void saveToPendingReplaceCommit(HoodieInstant instant, Option content); /** * Save pending cluster instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToPendingClusterCommit(HoodieInstant instant, Option content); + void saveToPendingClusterCommit(HoodieInstant instant, Option content); /** * Save clean requested instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToCleanRequested(HoodieInstant instant, Option content); + void saveToCleanRequested(HoodieInstant instant, Option content); /** * Save rollback requested instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToRollbackRequested(HoodieInstant instant, Option content); + void saveToRollbackRequested(HoodieInstant instant, Option content); /** * Save Restore requested instant with metadata. * @param instant Instant to save. * @param content Metadata to be stored in instant file. */ - public void saveToRestoreRequested(HoodieInstant instant, Option content); + void saveToRestoreRequested(HoodieInstant instant, Option content); /** * Transition index instant state from requested to inflight. @@ -400,7 +400,7 @@ public void transitionRequestedToInflight(HoodieInstant requested, Option data); + HoodieInstant transitionIndexRequestedToInflight(HoodieInstant requestedInstant, Option data); /** * Transition index instant state from inflight to completed. @@ -409,33 +409,33 @@ public void transitionRequestedToInflight(HoodieInstant requested, Option data); + HoodieInstant transitionIndexInflightToComplete(boolean shouldLock, + HoodieInstant inflightInstant, Option data); /** * Revert index instant state from inflight to requested. * @param inflightInstant Inflight Instant * @return requested instant */ - public HoodieInstant revertIndexInflightToRequested(HoodieInstant inflightInstant); + HoodieInstant revertIndexInflightToRequested(HoodieInstant inflightInstant); /** * Save content for inflight/requested index instant. */ - public void saveToPendingIndexAction(HoodieInstant instant, Option content); + void saveToPendingIndexAction(HoodieInstant instant, Option content); /** * Reloads timeline from storage * @return */ - public HoodieActiveTimeline reload(); + HoodieActiveTimeline reload(); /** * Copies instant file from active timeline to destination directory. * @param instant Instant to copy. * @param dstDir Destination location. */ - public void copyInstant(HoodieInstant instant, StoragePath dstDir); + void copyInstant(HoodieInstant instant, StoragePath dstDir); /** * Valid Extensions in active timeline. diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java index e976123dfbe7..82557ac46fef 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieArchivedTimeline.java @@ -23,28 +23,28 @@ public interface HoodieArchivedTimeline extends HoodieTimeline { - public static final String COMPLETION_TIME_ARCHIVED_META_FIELD = "completionTime"; + String COMPLETION_TIME_ARCHIVED_META_FIELD = "completionTime"; - public void loadInstantDetailsInMemory(String startTs, String endTs); + void loadInstantDetailsInMemory(String startTs, String endTs); - public void loadCompletedInstantDetailsInMemory(); + void loadCompletedInstantDetailsInMemory(); - public void loadCompactionDetailsInMemory(String compactionInstantTime); + void loadCompactionDetailsInMemory(String compactionInstantTime); - public void loadCompactionDetailsInMemory(String startTs, String endTs); + void loadCompactionDetailsInMemory(String startTs, String endTs); - public void clearInstantDetailsFromMemory(String instantTime); + void clearInstantDetailsFromMemory(String instantTime); - public void clearInstantDetailsFromMemory(String startTs, String endTs); + void clearInstantDetailsFromMemory(String startTs, String endTs); - public HoodieArchivedTimeline reload(); + HoodieArchivedTimeline reload(); - public HoodieArchivedTimeline reload(String startTs); + HoodieArchivedTimeline reload(String startTs); /** * Different mode for loading the archived instant metadata. */ - public enum LoadMode { + enum LoadMode { /** * Loads the instantTime, completionTime. */ @@ -70,7 +70,7 @@ public enum LoadMode { /** * A time based filter with range (startTs, endTs]. */ - public static class TimeRangeFilter { + class TimeRangeFilter { protected final String startTs; protected final String endTs; @@ -87,7 +87,7 @@ public boolean isInRange(String instantTime) { /** * A time based filter with range [startTs, endTs). */ - public static class ClosedOpenTimeRangeFilter extends TimeRangeFilter { + class ClosedOpenTimeRangeFilter extends TimeRangeFilter { public ClosedOpenTimeRangeFilter(String startTs, String endTs) { super(startTs, endTs); @@ -101,7 +101,7 @@ public boolean isInRange(String instantTime) { /** * A time based filter with range [startTs, +∞). */ - public static class StartTsFilter extends TimeRangeFilter { + class StartTsFilter extends TimeRangeFilter { public StartTsFilter(String startTs) { super(startTs, null); // endTs is never used diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieInstantTimeGenerator.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieInstantTimeGenerator.java index d1cd7d48d03b..f1fa03095d9f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieInstantTimeGenerator.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/HoodieInstantTimeGenerator.java @@ -48,13 +48,13 @@ public class HoodieInstantTimeGenerator { public static final int MILLIS_INSTANT_TIMESTAMP_FORMAT_LENGTH = MILLIS_INSTANT_TIMESTAMP_FORMAT.length(); // Formatter to generate Instant timestamps // Unfortunately millisecond format is not parsable as is https://bugs.openjdk.java.net/browse/JDK-8031085. hence have to do appendValue() - private static DateTimeFormatter MILLIS_INSTANT_TIME_FORMATTER = new DateTimeFormatterBuilder().appendPattern(SECS_INSTANT_TIMESTAMP_FORMAT) + private static final DateTimeFormatter MILLIS_INSTANT_TIME_FORMATTER = new DateTimeFormatterBuilder().appendPattern(SECS_INSTANT_TIMESTAMP_FORMAT) .appendValue(ChronoField.MILLI_OF_SECOND, 3).toFormatter(); private static final String MILLIS_GRANULARITY_DATE_FORMAT = "yyyy-MM-dd HH:mm:ss.SSS"; - private static DateTimeFormatter MILLIS_GRANULARITY_DATE_FORMATTER = DateTimeFormatter.ofPattern(MILLIS_GRANULARITY_DATE_FORMAT); + private static final DateTimeFormatter MILLIS_GRANULARITY_DATE_FORMATTER = DateTimeFormatter.ofPattern(MILLIS_GRANULARITY_DATE_FORMAT); // The last Instant timestamp generated - private static AtomicReference lastInstantTime = new AtomicReference<>(String.valueOf(Integer.MIN_VALUE)); + private static final AtomicReference LAST_INSTANT_TIME = new AtomicReference<>(String.valueOf(Integer.MIN_VALUE)); // The default number of milliseconds that we add if they are not present // We prefer the max timestamp as it mimics the current behavior with second granularity @@ -72,7 +72,7 @@ public class HoodieInstantTimeGenerator { * @param milliseconds Milliseconds to add to current time while generating the new instant time */ public static String createNewInstantTime(boolean shouldLock, TimeGenerator timeGenerator, long milliseconds) { - return lastInstantTime.updateAndGet((oldVal) -> { + return LAST_INSTANT_TIME.updateAndGet((oldVal) -> { String newCommitTime; do { Date d = new Date(timeGenerator.generateTime(!shouldLock) + milliseconds); @@ -157,7 +157,7 @@ public static String getInstantFromTemporalAccessor(TemporalAccessor temporalAcc @VisibleForTesting public static String getLastInstantTime() { - return lastInstantTime.get(); + return LAST_INSTANT_TIME.get(); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/TimelineUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/TimelineUtils.java index 742748573345..a15edc59ba5d 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/TimelineUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/TimelineUtils.java @@ -307,12 +307,12 @@ public static HoodieTimeline getCommitsTimelineAfter( ? metaClient.getArchivedTimeline(exclusiveStartInstantTime).mergeTimeline(writeTimeline) : writeTimeline; - HoodieTimeline timelineSinceLastSync = (HoodieTimeline) (((HoodieTimeline) timeline).getCommitsTimeline() - .findInstantsAfter(exclusiveStartInstantTime, Integer.MAX_VALUE)); + HoodieTimeline timelineSinceLastSync = timeline.getCommitsTimeline() + .findInstantsAfter(exclusiveStartInstantTime, Integer.MAX_VALUE); if (lastMaxCompletionTime.isPresent()) { // Get 'hollow' instants that have less instant time than exclusiveStartInstantTime but with greater commit completion time - HoodieTimeline hollowInstantsTimeline = ((HoodieTimeline) timeline).getCommitsTimeline() + HoodieTimeline hollowInstantsTimeline = timeline.getCommitsTimeline() .filterCompletedInstants() .filter(s -> compareTimestamps(s.requestedTime(), LESSER_THAN, exclusiveStartInstantTime)) .filter(s -> compareTimestamps(s.getCompletionTime(), GREATER_THAN, lastMaxCompletionTime.get())); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/ActiveTimelineV1.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/ActiveTimelineV1.java index d35e672d8304..5ae2a6da3e76 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/ActiveTimelineV1.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/ActiveTimelineV1.java @@ -426,8 +426,8 @@ public HoodieInstant transitionRollbackRequestedToInflight(HoodieInstant request @Override public HoodieInstant transitionRestoreRequestedToInflight(HoodieInstant requestedInstant) { ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.RESTORE_ACTION), "Transition to inflight requested for a restore instant with diff action " - + requestedInstant.toString()); - ValidationUtils.checkArgument(requestedInstant.isRequested(), "Transition to inflight requested for an instant not in requested state " + requestedInstant.toString()); + + requestedInstant); + ValidationUtils.checkArgument(requestedInstant.isRequested(), "Transition to inflight requested for an instant not in requested state " + requestedInstant); HoodieInstant inflight = instantGenerator.createNewInstant(HoodieInstant.State.INFLIGHT, RESTORE_ACTION, requestedInstant.requestedTime()); transitionState(requestedInstant, inflight, Option.empty()); return inflight; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/CompletionTimeQueryViewV1.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/CompletionTimeQueryViewV1.java index c75ad5fd59f8..f99662780176 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/CompletionTimeQueryViewV1.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/CompletionTimeQueryViewV1.java @@ -60,7 +60,7 @@ public class CompletionTimeQueryViewV1 implements CompletionTimeQueryView, Seria * a completion query for t5 would trigger lazy loading with this cursor instant updated to t5. * This sliding window model amortizes redundant loading from different queries. */ - private volatile String cursorInstant; + private final String cursorInstant; /** * The first write instant on the active timeline, used for query optimization. diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v2/ActiveTimelineV2.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v2/ActiveTimelineV2.java index 497ed0f0eca8..9260ab352424 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v2/ActiveTimelineV2.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v2/ActiveTimelineV2.java @@ -450,7 +450,7 @@ public HoodieInstant transitionRollbackRequestedToInflight(HoodieInstant request public HoodieInstant transitionRestoreRequestedToInflight(HoodieInstant requestedInstant) { ValidationUtils.checkArgument(requestedInstant.getAction().equals(HoodieTimeline.RESTORE_ACTION), "Transition to inflight requested for a restore instant with diff action " + requestedInstant); - ValidationUtils.checkArgument(requestedInstant.isRequested(), "Transition to inflight requested for an instant not in requested state " + requestedInstant.toString()); + ValidationUtils.checkArgument(requestedInstant.isRequested(), "Transition to inflight requested for an instant not in requested state " + requestedInstant); HoodieInstant inflight = instantGenerator.createNewInstant(HoodieInstant.State.INFLIGHT, RESTORE_ACTION, requestedInstant.requestedTime()); transitionPendingState(requestedInstant, inflight, Option.empty()); return inflight; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTablePreCommitFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTablePreCommitFileSystemView.java index ea6b8f429bd8..5e5e9457b4c2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTablePreCommitFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/HoodieTablePreCommitFileSystemView.java @@ -39,11 +39,11 @@ */ public class HoodieTablePreCommitFileSystemView { - private Map> partitionToReplaceFileIds; - private List filesWritten; - private String preCommitInstantTime; - private SyncableFileSystemView completedCommitsFileSystemView; - private HoodieTableMetaClient tableMetaClient; + private final Map> partitionToReplaceFileIds; + private final List filesWritten; + private final String preCommitInstantTime; + private final SyncableFileSystemView completedCommitsFileSystemView; + private final HoodieTableMetaClient tableMetaClient; /** * Create a file system view for the inflight commit that we are validating. diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java index 7ebea7b4f799..525e96b8a11c 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/RemoteHoodieTableFileSystemView.java @@ -189,7 +189,7 @@ private T executeRequest(String requestPath, Map queryParame LOG.info("Sending request : ({})", url); Response response = retryHelper != null ? retryHelper.start(() -> get(timeoutMs, url, method)) : get(timeoutMs, url, method); String content = response.returnContent().asString(Consts.UTF_8); - return (T) OBJECT_MAPPER.readValue(content, reference); + return OBJECT_MAPPER.readValue(content, reference); } private Map getParamsWithPartitionPath(String partitionPath) { diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/AvroSchemaCache.java b/hudi-common/src/main/java/org/apache/hudi/common/util/AvroSchemaCache.java index 538b8ed1d498..2e3bc82fc61f 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/AvroSchemaCache.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/AvroSchemaCache.java @@ -36,8 +36,8 @@ @NotThreadSafe public class AvroSchemaCache implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(AvroSchemaCache.class); - private Map versionIdToSchema; // the mapping from version_id -> schema - private Map schemaToVersionId; // the mapping from schema -> version_id + private final Map versionIdToSchema; // the mapping from version_id -> schema + private final Map schemaToVersionId; // the mapping from schema -> version_id private int nextVersionId = 0; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/ConfigUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/util/ConfigUtils.java index 1d3e3ce60229..36cbc0ab5c54 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/ConfigUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/ConfigUtils.java @@ -454,8 +454,7 @@ public static String getStringWithAltKeys(TypedProperties props, public static boolean getBooleanWithAltKeys(Properties props, ConfigProperty configProperty) { Option rawValue = getRawValueWithAltKeys(props, configProperty); - boolean defaultValue = configProperty.hasDefaultValue() - ? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false; + boolean defaultValue = configProperty.hasDefaultValue() && Boolean.parseBoolean(configProperty.defaultValue().toString()); return rawValue.map(v -> Boolean.parseBoolean(v.toString())).orElse(defaultValue); } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/InternalSchemaCache.java b/hudi-common/src/main/java/org/apache/hudi/common/util/InternalSchemaCache.java index 51f9587433c2..735520ebade7 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/InternalSchemaCache.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/InternalSchemaCache.java @@ -36,11 +36,11 @@ import org.apache.hudi.internal.schema.utils.SerDeHelper; import org.apache.hudi.storage.HoodieStorage; import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.storage.StoragePathInfo; import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; import org.apache.avro.Schema; -import org.apache.hudi.storage.StoragePathInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,11 +61,11 @@ public class InternalSchemaCache { private static final Logger LOG = LoggerFactory.getLogger(InternalSchemaCache.class); // Use segment lock to reduce competition. // the lock size should be powers of 2 for better hash. - private static Object[] lockList = new Object[16]; + private static final Object[] LOCK_LIST = new Object[16]; static { - for (int i = 0; i < lockList.length; i++) { - lockList[i] = new Object(); + for (int i = 0; i < LOCK_LIST.length; i++) { + LOCK_LIST[i] = new Object(); } } @@ -90,7 +90,7 @@ public static InternalSchema searchSchemaAndCache(long versionID, HoodieTableMet } String tablePath = metaClient.getBasePath().toString(); // use segment lock to reduce competition. - synchronized (lockList[tablePath.hashCode() & (lockList.length - 1)]) { + synchronized (LOCK_LIST[tablePath.hashCode() & (LOCK_LIST.length - 1)]) { TreeMap historicalSchemas = HISTORICAL_SCHEMA_CACHE.getIfPresent(tablePath); if (historicalSchemas == null || InternalSchemaUtils.searchSchema(versionID, historicalSchemas) == null) { historicalSchemas = getHistoricalSchemas(metaClient); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java index 2f1595bfe1dd..9573b5f81974 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/BitCaskDiskMap.java @@ -306,7 +306,7 @@ public Collection values() { @Override public Stream valueStream() { final BufferedRandomAccessFile file = getRandomAccessFile(); - return valueMetadataMap.values().stream().sorted().sequential().map(valueMetaData -> (R) get(valueMetaData, file, isCompressionEnabled)); + return valueMetadataMap.values().stream().sorted().sequential().map(valueMetaData -> get(valueMetaData, file, isCompressionEnabled)); } @Override @@ -324,17 +324,17 @@ public Set> entrySet() { public static final class FileEntry { // Checksum of the value written to disk, compared during every readFromDisk to make sure no corruption - private Long crc; + private final Long crc; // Size (numberOfBytes) of the key written to disk - private Integer sizeOfKey; + private final Integer sizeOfKey; // Size (numberOfBytes) of the value written to disk - private Integer sizeOfValue; + private final Integer sizeOfValue; // Actual key - private byte[] key; + private final byte[] key; // Actual value - private byte[] value; + private final byte[] value; // Current timestamp when the value was written to disk - private Long timestamp; + private final Long timestamp; public FileEntry(long crc, int sizeOfKey, int sizeOfValue, byte[] key, byte[] value, long timestamp) { this.crc = crc; @@ -376,15 +376,15 @@ public long getTimestamp() { public static final class ValueMetadata implements Comparable { // FilePath to store the spilled data - private String filePath; + private final String filePath; // Size (numberOfBytes) of the value written to disk - private Integer sizeOfValue; + private final Integer sizeOfValue; // FilePosition of the value written to disk - private Long offsetOfValue; + private final Long offsetOfValue; // Current timestamp when the value was written to disk - private Long timestamp; + private final Long timestamp; - protected ValueMetadata(String filePath, int sizeOfValue, long offsetOfValue, long timestamp) { + private ValueMetadata(String filePath, int sizeOfValue, long offsetOfValue, long timestamp) { this.filePath = filePath; this.sizeOfValue = sizeOfValue; this.offsetOfValue = offsetOfValue; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/DiskMap.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/DiskMap.java index c8d57aec032e..b73664f0ed38 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/DiskMap.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/DiskMap.java @@ -49,7 +49,7 @@ public abstract class DiskMap im public DiskMap(String basePath, String prefix) throws IOException { this.diskMapPath = - String.format("%s/%s-%s-%s", basePath, SUBFOLDER_PREFIX, prefix, UUID.randomUUID().toString()); + String.format("%s/%s-%s-%s", basePath, SUBFOLDER_PREFIX, prefix, UUID.randomUUID()); diskMapPathFile = new File(diskMapPath); FileIOUtils.deleteDirectory(diskMapPathFile); FileIOUtils.mkdir(diskMapPathFile); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Pair.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Pair.java index cdfb0481692f..2487def09b44 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Pair.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Pair.java @@ -197,7 +197,7 @@ public int hashCode() { */ @Override public String toString() { - return new StringBuilder().append('(').append(getLeft()).append(',').append(getRight()).append(')').toString(); + return "(" + getLeft() + ',' + getRight() + ')'; } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java index 7503adc0fefd..33b45c8bf26e 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/RocksDBDAO.java @@ -73,7 +73,7 @@ public class RocksDBDAO { public RocksDBDAO(String basePath, String rocksDBBasePath) { this.rocksDBBasePath = - String.format("%s/%s/%s", rocksDBBasePath, URI.create(basePath).getPath().replace(":","").replace("/", "_"), UUID.randomUUID().toString()); + String.format("%s/%s/%s", rocksDBBasePath, URI.create(basePath).getPath().replace(":","").replace("/", "_"), UUID.randomUUID()); init(); totalBytesWritten = 0L; } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Triple.java b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Triple.java index 9c4d47eb67b3..3c02aafaea04 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Triple.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/collection/Triple.java @@ -168,8 +168,8 @@ public int hashCode() { */ @Override public String toString() { - return new StringBuilder().append('(').append(getLeft()).append(',').append(getMiddle()).append(',') - .append(getRight()).append(')').toString(); + return "(" + getLeft() + ',' + getMiddle() + ',' + + getRight() + ')'; } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/hash/JenkinsHash.java b/hudi-common/src/main/java/org/apache/hudi/common/util/hash/JenkinsHash.java index a254a78970f3..7747095a23c8 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/hash/JenkinsHash.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/hash/JenkinsHash.java @@ -40,13 +40,13 @@ * Dr. Dobbs Article */ public class JenkinsHash extends Hash { - private static long INT_MASK = 0x00000000ffffffffL; - private static long BYTE_MASK = 0x00000000000000ffL; + private static final long INT_MASK = 0x00000000ffffffffL; + private static final long BYTE_MASK = 0x00000000000000ffL; - private static JenkinsHash _instance = new JenkinsHash(); + private static final JenkinsHash INSTANCE = new JenkinsHash(); public static Hash getInstance() { - return _instance; + return INSTANCE; } private static long rot(long val, int pos) { @@ -89,7 +89,7 @@ public int hash(byte[] key, int nbytes, int initval) { a = b = c = (0x00000000deadbeefL + length + initval) & INT_MASK; int offset = 0; for (; length > 12; offset += 12, length -= 12) { - a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; + a = (a + (key[offset] & BYTE_MASK)) & INT_MASK; a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 2] & BYTE_MASK) << 16) & INT_MASK)) & INT_MASK; a = (a + (((key[offset + 3] & BYTE_MASK) << 24) & INT_MASK)) & INT_MASK; @@ -202,7 +202,7 @@ public int hash(byte[] key, int nbytes, int initval) { case 2: a = (a + (((key[offset + 1] & BYTE_MASK) << 8) & INT_MASK)) & INT_MASK; case 1: - a = (a + (key[offset + 0] & BYTE_MASK)) & INT_MASK; + a = (a + (key[offset] & BYTE_MASK)) & INT_MASK; break; case 0: return (int) (c & INT_MASK); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/hash/MurmurHash.java b/hudi-common/src/main/java/org/apache/hudi/common/util/hash/MurmurHash.java index dcd074b881d1..f67858c1ed86 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/hash/MurmurHash.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/hash/MurmurHash.java @@ -29,10 +29,10 @@ * to Java by Andrzej Bialecki (ab at getopt org).

*/ public class MurmurHash extends Hash { - private static MurmurHash _instance = new MurmurHash(); + private static final MurmurHash INSTANCE = new MurmurHash(); public static Hash getInstance() { - return _instance; + return INSTANCE; } @Override @@ -56,7 +56,7 @@ public int hash(byte[] data, int offset, int length, int seed) { k = k << 8; k = k | (data[i4 + 1] & 0xff); k = k << 8; - k = k | (data[i4 + 0] & 0xff); + k = k | (data[i4] & 0xff); k *= m; k ^= k >>> r; k *= m; @@ -77,7 +77,7 @@ public int hash(byte[] data, int offset, int length, int seed) { h ^= (int) data[length - 2] << 8; } if (left >= 1) { - h ^= (int) data[length - 1]; + h ^= data[length - 1]; } h *= m; diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java index ae41c2a3a937..606c188986cd 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java @@ -48,7 +48,7 @@ public class DisruptorMessageQueue implements HoodieMessageQueue { private final Disruptor queue; private final Function transformFunction; private final RingBuffer ringBuffer; - private AtomicReference throwable = new AtomicReference<>(null); + private final AtomicReference throwable = new AtomicReference<>(null); private boolean isShutdown = false; private boolean isStarted = false; diff --git a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java index b0a019f81106..2340dd509169 100644 --- a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java +++ b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java @@ -182,7 +182,7 @@ public static HoodieMetricsPrometheusConfig.Builder newBuilder() { public static class Builder { - private HoodieMetricsPrometheusConfig hoodieMetricsPrometheusConfig = new HoodieMetricsPrometheusConfig(); + private final HoodieMetricsPrometheusConfig hoodieMetricsPrometheusConfig = new HoodieMetricsPrometheusConfig(); public Builder fromProperties(Properties props) { this.hoodieMetricsPrometheusConfig.getProps().putAll(props); diff --git a/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java b/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java index 11c4f39507f1..49c04de93a18 100644 --- a/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java +++ b/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java @@ -166,11 +166,7 @@ public Boolean eval(StructLike data) { if (right != null && !(Boolean) right) { return false; } else { - if (left != null && right != null) { - return true; - } else { - return false; - } + return left != null && right != null; } } } diff --git a/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java b/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java index 8e9a7e01a162..c75da2b74cca 100644 --- a/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java +++ b/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java @@ -80,7 +80,6 @@ private void validate() { } break; default: - return; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java index 7978c21e54c6..7674caf971bc 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java @@ -170,7 +170,6 @@ private void visitIdToField(Type type, Map index) { } return; default: - return; } } diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java index ed03a7349cb7..ac0768897584 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java @@ -23,6 +23,7 @@ import java.io.Serializable; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -650,7 +651,7 @@ public Field field(int id) { @Override public List fields() { - return Arrays.asList(elementField); + return Collections.singletonList(elementField); } public int elementId() { diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java index 36aac462a137..b4b404e03518 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java @@ -28,7 +28,7 @@ * Manage schema change for HoodieWriteClient. */ public class InternalSchemaChangeApplier { - private InternalSchema latestSchema; + private final InternalSchema latestSchema; public InternalSchemaChangeApplier(InternalSchema latestSchema) { this.latestSchema = latestSchema; @@ -75,7 +75,7 @@ public InternalSchema applyAddChange( throw new IllegalArgumentException(String.format("only support first/before/after but found: %s", positionType)); } } else { - throw new IllegalArgumentException(String.format("positionType should be specified")); + throw new IllegalArgumentException("positionType should be specified"); } return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, add); } diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java index 35b3c781b4ee..e9932266b657 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java @@ -42,9 +42,9 @@ public interface TableChange { */ enum ColumnChangeID { ADD, UPDATE, DELETE, PROPERTY_CHANGE, REPLACE; - private String name; + private final String name; - private ColumnChangeID() { + ColumnChangeID() { this.name = this.name().toLowerCase(Locale.ROOT); } diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java index 94e72ff7180e..c1fbc902f758 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java @@ -90,7 +90,7 @@ public static InternalSchema pruneInternalSchemaByID(InternalSchema schema, List if (f != null) { newFields.add(f); } else { - throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema.toString())); + throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema)); } } } @@ -111,10 +111,8 @@ private static Type pruneType(Type type, List fieldIds) { Type newType = pruneType(f.type(), fieldIds); if (fieldIds.contains(f.fieldId())) { newTypes.add(f.type()); - } else if (newType != null) { - newTypes.add(newType); } else { - newTypes.add(null); + newTypes.add(newType); } } boolean changed = false; diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java index 7891fc4582cd..35545f16cdf3 100644 --- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java +++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java @@ -33,7 +33,7 @@ import java.io.IOException; import java.io.StringWriter; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -341,7 +341,7 @@ public static String inheritSchemas(InternalSchema newSchema, String oldSchemas) return ""; } if (oldSchemas == null || oldSchemas.isEmpty()) { - return toJson(Arrays.asList(newSchema)); + return toJson(Collections.singletonList(newSchema)); } String checkedString = "{\"schemas\":["; if (!oldSchemas.startsWith("{\"schemas\":")) { diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java index 27ea15bc2509..2da11bea079d 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java +++ b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java @@ -829,9 +829,9 @@ public static HoodieData convertMetadataToRecordIndexRecords(Hoodi return deletedRecordKeys.stream().map(recordKey -> HoodieMetadataPayload.createRecordIndexDelete(recordKey)).collect(toList()).iterator(); } // ignore log file data blocks. - return new ArrayList().iterator(); + return Collections.emptyIterator(); } else { - throw new HoodieIOException("Unsupported file type " + fullFilePath.toString() + " while generating MDT records"); + throw new HoodieIOException("Unsupported file type " + fullFilePath + " while generating MDT records"); } }); @@ -933,7 +933,7 @@ public static List getRecordKeysDeletedOrUpdated(HoodieEngineContext eng return getRecordKeys(fullFilePath.toString(), dataTableMetaClient, finalWriterSchemaOpt, maxBufferSize, instantTime, true, true) .iterator(); } else { - throw new HoodieIOException("Found unsupported file type " + fullFilePath.toString() + ", while generating MDT records"); + throw new HoodieIOException("Found unsupported file type " + fullFilePath + ", while generating MDT records"); } }).collectAsList(); } catch (Exception e) { @@ -2662,7 +2662,7 @@ public static HoodieMetadataColumnStats mergeColumnStatsRecords(HoodieMetadataCo Comparable minValue = (Comparable) Stream.of( - (Comparable) unwrapAvroValueWrapper(prevColumnStats.getMinValue()), + unwrapAvroValueWrapper(prevColumnStats.getMinValue()), (Comparable) unwrapAvroValueWrapper(newColumnStats.getMinValue())) .filter(Objects::nonNull) .min(Comparator.naturalOrder()) @@ -2670,7 +2670,7 @@ public static HoodieMetadataColumnStats mergeColumnStatsRecords(HoodieMetadataCo Comparable maxValue = (Comparable) Stream.of( - (Comparable) unwrapAvroValueWrapper(prevColumnStats.getMaxValue()), + unwrapAvroValueWrapper(prevColumnStats.getMaxValue()), (Comparable) unwrapAvroValueWrapper(newColumnStats.getMaxValue())) .filter(Objects::nonNull) .max(Comparator.naturalOrder()) diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java index e3acab9a90b9..161536e19cc5 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java +++ b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java @@ -39,8 +39,8 @@ public class MetricsGraphiteReporter extends MetricsReporter { private final MetricRegistry registry; private final GraphiteReporter graphiteReporter; private final HoodieMetricsConfig metricsConfig; - private String serverHost; - private int serverPort; + private final String serverHost; + private final int serverPort; private final int periodSeconds; public MetricsGraphiteReporter(HoodieMetricsConfig metricsConfig, MetricRegistry registry) { diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java index 13574b1e1569..6922393f39aa 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java +++ b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java @@ -28,8 +28,8 @@ * Extensible metrics reporter for custom implementation. */ public abstract class CustomizableMetricsReporter extends MetricsReporter { - private Properties props; - private MetricRegistry registry; + private final Properties props; + private final MetricRegistry registry; public CustomizableMetricsReporter(Properties props, MetricRegistry registry) { this.props = props; diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java index 32b0ee809f93..1f3e2a4ca4a8 100644 --- a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java +++ b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java @@ -173,6 +173,6 @@ String build() { } enum MetricType { - gauge; + gauge } } diff --git a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java index 47a5009ade59..ca75d8db1171 100644 --- a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java +++ b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java @@ -113,12 +113,11 @@ public static String getCreateHoodieTableDDL( } public static String getCreateHudiCatalogDDL(final String catalogName, final String catalogPath) { - StringBuilder builder = new StringBuilder(); - builder.append("create catalog ").append(catalogName).append(" with (\n"); - builder.append(" 'type' = 'hudi',\n" - + " 'catalog.path' = '").append(catalogPath).append("'"); - builder.append("\n)"); - return builder.toString(); + return "create catalog " + catalogName + " with (\n" + + " 'type' = 'hudi',\n" + + " 'catalog.path' = '" + + catalogPath + "'" + + "\n)"; } public static String getFileSourceDDL(String tableName) { @@ -175,8 +174,7 @@ public static String getCollectSinkDDL(String tableName, ResolvedSchema tableSch } builder.append("\n"); } - final String withProps = "" - + ") with (\n" + final String withProps = ") with (\n" + " 'connector' = '" + CollectSinkTableFactory.FACTORY_ID + "'\n" + ")"; builder.append(withProps); diff --git a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java index 76306f780646..ae814acea1f5 100644 --- a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java +++ b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java @@ -34,8 +34,8 @@ * Builder for {@link ResolvedSchema}. */ public class SchemaBuilder { - private List columns; - private List watermarkSpecs; + private final List columns; + private final List watermarkSpecs; private UniqueConstraint constraint; public static SchemaBuilder instance() { diff --git a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java index f1529b6e03a2..31f93601f927 100644 --- a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java +++ b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java @@ -34,7 +34,7 @@ public class HoodieSparkBootstrapExample { - private static String tableType = HoodieTableType.MERGE_ON_READ.name(); + private static final String TABLE_TYPE = HoodieTableType.MERGE_ON_READ.name(); public static void main(String[] args) throws Exception { if (args.length < 5) { diff --git a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java index b8df6161ca45..457020036e7c 100644 --- a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java +++ b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java @@ -66,7 +66,7 @@ public class HoodieWriteClientExample { private static final Logger LOG = LoggerFactory.getLogger(HoodieWriteClientExample.class); - private static String tableType = HoodieTableType.COPY_ON_WRITE.name(); + private static final String TABLE_TYPE = HoodieTableType.COPY_ON_WRITE.name(); public static void main(String[] args) throws Exception { if (args.length < 2) { @@ -87,7 +87,7 @@ public static void main(String[] args) throws Exception { FileSystem fs = HadoopFSUtils.getFs(tablePath, jsc.hadoopConfiguration()); if (!fs.exists(path)) { HoodieTableMetaClient.newTableBuilder() - .setTableType(tableType) + .setTableType(TABLE_TYPE) .setTableName(tableName) .setPayloadClass(HoodieAvroPayload.class) .initTable(HadoopFSUtils.getStorageConfWithCopy(jsc.hadoopConfiguration()), tablePath); @@ -139,7 +139,7 @@ public static void main(String[] args) throws Exception { client.deletePartitions(deleteList, newCommitTime); // compaction - if (HoodieTableType.valueOf(tableType) == HoodieTableType.MERGE_ON_READ) { + if (HoodieTableType.valueOf(TABLE_TYPE) == HoodieTableType.MERGE_ON_READ) { Option instant = client.scheduleCompaction(Option.empty()); HoodieWriteMetadata> compactionMetadata = client.compact(instant.get()); client.commitCompaction(instant.get(), compactionMetadata.getCommitMetadata().get(), Option.empty()); diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java index 5a898760a282..e223c3bf60b6 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java @@ -54,7 +54,7 @@ public class SortOperator extends TableStreamOperator private GeneratedNormalizedKeyComputer gComputer; private GeneratedRecordComparator gComparator; - private Configuration conf; + private final Configuration conf; private transient BinaryExternalSorter sorter; private transient StreamRecordCollector collector; diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java index 73c0d655fb72..e900af3c2500 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java @@ -57,8 +57,8 @@ public class FlinkConsistentBucketUpdateStrategy private static final Logger LOG = LoggerFactory.getLogger(FlinkConsistentBucketUpdateStrategy.class); private boolean initialized = false; - private List indexKeyFields; - private Map> partitionToIdentifier; + private final List indexKeyFields; + private final Map> partitionToIdentifier; private String lastRefreshInstant = HoodieTimeline.INIT_INSTANT_TS; public FlinkConsistentBucketUpdateStrategy(HoodieFlinkWriteClient writeClient, List indexKeyFields) { diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java index 2fe2867b7546..bb59f9961ab9 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java @@ -29,7 +29,7 @@ */ public class ChainedTransformer implements Transformer { - private List transformers; + private final List transformers; public ChainedTransformer(List transformers) { this.transformers = transformers; diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java index 0feda05a4c77..9c6ec425ed51 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java @@ -571,9 +571,9 @@ private static BigDecimal getBigDecimal(@NotNull Object value) { } else if (value instanceof Double) { // new BigDecimal() are used instead of BigDecimal.valueOf() due to // receive exact decimal representation of the double's binary floating-point value - return new BigDecimal((Double) value); + return BigDecimal.valueOf((Double) value); } else if (value instanceof Float) { - return new BigDecimal(((Float) value).doubleValue()); + return BigDecimal.valueOf(((Float) value).doubleValue()); } else if (value instanceof Long) { return new BigDecimal((Long) value); } else if (value instanceof Integer) { diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java index 90f81289bd2b..5c9cde00ba90 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java @@ -330,14 +330,10 @@ private String getSourceOperatorName(String operatorName) { List fields = Arrays.stream(this.requiredPos) .mapToObj(i -> schemaFieldNames[i]) .collect(Collectors.toList()); - StringBuilder sb = new StringBuilder(); - sb.append(operatorName) - .append("(") - .append("table=").append(Collections.singletonList(conf.getString(FlinkOptions.TABLE_NAME))) - .append(", ") - .append("fields=").append(fields) - .append(")"); - return sb.toString(); + return operatorName + "(" + + "table=" + Collections.singletonList(conf.get(FlinkOptions.TABLE_NAME)) + + ", " + "fields=" + fields + + ")"; } @Nullable diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java index 0628673b8e58..80c33b76cf3c 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java @@ -310,8 +310,7 @@ public static Option getRawValueWithAltKeys(org.apache.flink.configurati public static boolean getBooleanWithAltKeys(org.apache.flink.configuration.Configuration conf, ConfigProperty configProperty) { Option rawValue = getRawValueWithAltKeys(conf, configProperty); - boolean defaultValue = configProperty.hasDefaultValue() - ? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false; + boolean defaultValue = configProperty.hasDefaultValue() && Boolean.parseBoolean(configProperty.defaultValue().toString()); return rawValue.map(Boolean::parseBoolean).orElse(defaultValue); } diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java index a43bf1189fb3..a7f6ec2a2bd9 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java @@ -138,7 +138,7 @@ private void checkCacheReload() { return; } // Determine whether to reload data by comparing instant - if (currentCommit != null && latestCommitInstant.get().equals(currentCommit)) { + if (latestCommitInstant.get().equals(currentCommit)) { LOG.info("Ignore loading data because the commit instant " + currentCommit + " has not changed."); return; } diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java index e9d0310d4756..1984a7c7baea 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java @@ -208,7 +208,7 @@ public static HoodieWriteConfig getHoodieClientConfig( ).build()) .forTable(conf.getString(FlinkOptions.TABLE_NAME)) .withStorageConfig(HoodieStorageConfig.newBuilder() - .logFileDataBlockMaxSize(conf.getInteger(FlinkOptions.WRITE_LOG_BLOCK_SIZE) * 1024 * 1024) + .logFileDataBlockMaxSize((long) conf.getInteger(FlinkOptions.WRITE_LOG_BLOCK_SIZE) * 1024 * 1024) .logFileMaxSize(conf.getLong(FlinkOptions.WRITE_LOG_MAX_SIZE) * 1024 * 1024) .parquetBlockSize(conf.getInteger(FlinkOptions.WRITE_PARQUET_BLOCK_SIZE) * 1024 * 1024) .parquetPageSize(conf.getInteger(FlinkOptions.WRITE_PARQUET_PAGE_SIZE) * 1024 * 1024) diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java index 57463d913ccf..f54abd4a16bb 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java @@ -50,7 +50,7 @@ @Internal public class RowDataToAvroConverters { - private static Conversions.DecimalConversion decimalConversion = new Conversions.DecimalConversion(); + private static final Conversions.DecimalConversion DECIMAL_CONVERSION = new Conversions.DecimalConversion(); // -------------------------------------------------------------------------------- // Runtime Converters @@ -219,7 +219,7 @@ public Object convert(Schema schema, Object object) { @Override public Object convert(Schema schema, Object object) { BigDecimal javaDecimal = ((DecimalData) object).toBigDecimal(); - return decimalConversion.toFixed(javaDecimal, schema, schema.getLogicalType()); + return DECIMAL_CONVERSION.toFixed(javaDecimal, schema, schema.getLogicalType()); } }; break; diff --git a/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java index 95d8fd720d30..f828ae9dffa7 100644 --- a/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java +++ b/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java @@ -29,8 +29,8 @@ public class HeapMapColumnVector extends AbstractHeapVector implements WritableColumnVector, MapColumnVector { - private WritableColumnVector keys; - private WritableColumnVector values; + private final WritableColumnVector keys; + private final WritableColumnVector values; public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) { super(len); diff --git a/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java index c01b8392893a..a684f64adfb6 100644 --- a/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java +++ b/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java @@ -29,8 +29,8 @@ public class HeapMapColumnVector extends AbstractHeapVector implements WritableColumnVector, MapColumnVector { - private WritableColumnVector keys; - private WritableColumnVector values; + private final WritableColumnVector keys; + private final WritableColumnVector values; public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) { super(len); diff --git a/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java index 95d8fd720d30..f828ae9dffa7 100644 --- a/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java +++ b/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java @@ -29,8 +29,8 @@ public class HeapMapColumnVector extends AbstractHeapVector implements WritableColumnVector, MapColumnVector { - private WritableColumnVector keys; - private WritableColumnVector values; + private final WritableColumnVector keys; + private final WritableColumnVector values; public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) { super(len); diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java index a1bae5deb7bf..27cb63d58222 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java @@ -73,7 +73,7 @@ public class DFSPropertiesConfiguration extends PropertiesConfig { @Nullable private final Configuration hadoopConfig; - private StoragePath mainFilePath; + private final StoragePath mainFilePath; // props read from user defined configuration file or input stream private final HoodieConfig hoodieConfig; diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java index 295e5163ed52..d05a07a74a45 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java @@ -147,13 +147,13 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch final Utf8 utf8 = (Utf8) value; bytes = utf8.getBytes(); } else if (value instanceof GenericData.EnumSymbol) { - bytes = getUTF8Bytes(((GenericData.EnumSymbol) value).toString()); + bytes = getUTF8Bytes(value.toString()); } else { throw new IllegalStateException(String.format( "Unrecognized type for Avro %s field value, which has type %s, value %s", type.getCategory().getName(), value.getClass().getName(), - value.toString() + value )); } @@ -177,7 +177,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch throw new IllegalStateException(String.format( "Unrecognized type for Avro DATE field value, which has type %s, value %s", value.getClass().getName(), - value.toString() + value )); } dateColVec.vector[vectorPos] = daysSinceEpoch; @@ -209,7 +209,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch throw new IllegalStateException(String.format( "Unrecognized type for Avro TIMESTAMP field value, which has type %s, value %s", value.getClass().getName(), - value.toString() + value )); } @@ -231,7 +231,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch throw new IllegalStateException(String.format( "Unrecognized type for Avro BINARY field value, which has type %s, value %s", value.getClass().getName(), - value.toString() + value )); } binaryColVec.setRef(vectorPos, binaryBytes, 0, binaryBytes.length); @@ -338,13 +338,13 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch throw new IllegalStateException(String.format( "Failed to add value %s to union with type %s", value == null ? "null" : value.toString(), - type.toString() + type )); } break; default: - throw new IllegalArgumentException("Invalid TypeDescription " + type.toString() + "."); + throw new IllegalArgumentException("Invalid TypeDescription " + type + "."); } } @@ -598,7 +598,7 @@ public static Object readFromVector(TypeDescription type, ColumnVector colVector ColumnVector fieldVector = unionVector.fields[tag]; return readFromVector(type.getChildren().get(tag), fieldVector, avroSchema.getTypes().get(tag), vectorPos); default: - throw new HoodieIOException("Unrecognized TypeDescription " + type.toString()); + throw new HoodieIOException("Unrecognized TypeDescription " + type); } } @@ -811,7 +811,7 @@ public static Schema createAvroSchemaWithDefaultValue(TypeDescription orcSchema, if (nullable) { fields.add(new Schema.Field(field.name(), nullableSchema, null, NULL_VALUE)); } else { - fields.add(new Schema.Field(field.name(), fieldSchema, null, (Object) null)); + fields.add(new Schema.Field(field.name(), fieldSchema, null, null)); } } Schema schema = Schema.createRecord(recordName, null, null, false); diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java index 9f1347872e2c..f4bac6284c9c 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java @@ -84,8 +84,7 @@ public static Option getRawValueWithAltKeys(Configuration conf, public static boolean getBooleanWithAltKeys(Configuration conf, ConfigProperty configProperty) { Option rawValue = getRawValueWithAltKeys(conf, configProperty); - boolean defaultValue = configProperty.hasDefaultValue() - ? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false; + boolean defaultValue = configProperty.hasDefaultValue() && Boolean.parseBoolean(configProperty.defaultValue().toString()); return rawValue.map(Boolean::parseBoolean).orElse(defaultValue); } } diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java index 68a28ab6989c..fb76a4a7d5ae 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java @@ -31,8 +31,8 @@ * Implementation of {@link FSDataInputStream} with bound check based on file size. */ public class BoundedFsDataInputStream extends FSDataInputStream { - private FileSystem fs; - private Path file; + private final FileSystem fs; + private final Path file; private long fileLen = -1L; public BoundedFsDataInputStream(FileSystem fs, Path file, InputStream in) { diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java index b31d2a8ca08a..c9d8fff3fbbd 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java @@ -36,7 +36,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.EnumSet; @@ -46,11 +45,11 @@ */ public class HoodieRetryWrapperFileSystem extends FileSystem { - private FileSystem fileSystem; - private long maxRetryIntervalMs; - private int maxRetryNumbers; - private long initialRetryIntervalMs; - private String retryExceptionsList; + private final FileSystem fileSystem; + private final long maxRetryIntervalMs; + private final int maxRetryNumbers; + private final long initialRetryIntervalMs; + private final String retryExceptionsList; public HoodieRetryWrapperFileSystem(FileSystem fs, long maxRetryIntervalMs, int maxRetryNumbers, long initialRetryIntervalMs, String retryExceptions) { this.fileSystem = fs; @@ -203,7 +202,7 @@ public boolean delete(Path f) throws IOException { } @Override - public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException { + public FileStatus[] listStatus(Path f) throws IOException { return new RetryHelper(maxRetryIntervalMs, maxRetryNumbers, initialRetryIntervalMs, retryExceptionsList).tryWith(() -> fileSystem.listStatus(f)).start(); } diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java index 64b827d2613e..8a7b5f0d974f 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java @@ -37,17 +37,17 @@ */ public class HoodieSerializableFileStatus implements Serializable { - private Path path; - private long length; - private Boolean isDir; - private short blockReplication; - private long blockSize; - private long modificationTime; - private long accessTime; - private FsPermission permission; - private String owner; - private String group; - private Path symlink; + private final Path path; + private final long length; + private final Boolean isDir; + private final short blockReplication; + private final long blockSize; + private final long modificationTime; + private final long accessTime; + private final FsPermission permission; + private final String owner; + private final String group; + private final Path symlink; HoodieSerializableFileStatus(Path path, long length, boolean isDir, short blockReplication, long blockSize, long modificationTime, long accessTime, diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java index 5d00a14fadbb..276b215fd18e 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java @@ -93,7 +93,7 @@ public static void setMetricsRegistry(Registry registry, Registry registryMeta) } - private ConcurrentMap openStreams = new ConcurrentHashMap<>(); + private final ConcurrentMap openStreams = new ConcurrentHashMap<>(); private FileSystem fileSystem; private URI uri; private ConsistencyGuard consistencyGuard = new NoOpConsistencyGuard(); @@ -1008,7 +1008,7 @@ public long getBytesWritten(Path file) { } // When the file is first written, we do not have a track of it throw new IllegalArgumentException( - file.toString() + " does not have a open stream. Cannot get the bytes written on the stream"); + file + " does not have a open stream. Cannot get the bytes written on the stream"); } public FileSystem getFileSystem() { diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java index 7831e76c88fc..572e593f6d89 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java @@ -29,7 +29,6 @@ import org.apache.hadoop.util.Progressable; import java.io.ByteArrayOutputStream; -import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -102,7 +101,7 @@ public boolean delete(Path path, boolean b) throws IOException { } @Override - public FileStatus[] listStatus(Path inlinePath) throws FileNotFoundException, IOException { + public FileStatus[] listStatus(Path inlinePath) throws IOException { throw new UnsupportedOperationException("No support for listStatus"); } diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java index 3fc95818bf27..6967f19246da 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java @@ -64,7 +64,7 @@ */ public class HoodieAvroHFileWriter implements HoodieAvroFileWriter { - private static AtomicLong recordIndex = new AtomicLong(1); + private static final AtomicLong RECORD_INDEX_COUNT = new AtomicLong(1); private final Path file; private final HoodieHFileConfig hfileConfig; private final boolean isWrapperFileSystem; @@ -127,7 +127,7 @@ public HoodieAvroHFileWriter(String instantTime, StoragePath file, HoodieHFileCo public void writeAvroWithMetadata(HoodieKey key, IndexedRecord avroRecord) throws IOException { if (populateMetaFields) { prepRecordWithMetadata(key, avroRecord, instantTime, - taskContextSupplier.getPartitionIdSupplier().get(), recordIndex.getAndIncrement(), file.getName()); + taskContextSupplier.getPartitionIdSupplier().get(), RECORD_INDEX_COUNT.getAndIncrement(), file.getName()); writeAvro(key.getRecordKey(), avroRecord); } else { writeAvro(key.getRecordKey(), avroRecord); diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java index 0516caad9ee5..e3ffc69c1a84 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java @@ -68,7 +68,7 @@ public class HoodieAvroOrcWriter implements HoodieAvroFileWriter, Closeable { private final String instantTime; private final TaskContextSupplier taskContextSupplier; - private HoodieOrcConfig orcConfig; + private final HoodieOrcConfig orcConfig; private String minRecordKey; private String maxRecordKey; diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java index 8f17fa0fa1e1..ed20f0bca114 100644 --- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java +++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java @@ -100,7 +100,7 @@ protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuil hadoopConf.forEach(conf -> { String key = conf.getKey(); if (key.startsWith(BLOOM_FILTER_ENABLED)) { - String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1, key.length()); + String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1); try { Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterEnabled", String.class, boolean.class); method.invoke(parquetWriterbuilder, column, Boolean.valueOf(conf.getValue()).booleanValue()); @@ -109,7 +109,7 @@ protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuil } } if (key.startsWith(BLOOM_FILTER_EXPECTED_NDV)) { - String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1, key.length()); + String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1); try { Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterNDV", String.class, long.class); method.invoke(parquetWriterbuilder, column, Long.valueOf(conf.getValue()).longValue()); diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java index 3414f5fdea88..5b32dd435a8b 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java @@ -50,10 +50,10 @@ public class HoodieHFileRecordReader implements RecordReader { private long count = 0; - private ArrayWritable valueObj; + private final ArrayWritable valueObj; private HoodieFileReader reader; private ClosableIterator> recordIterator; - private Schema schema; + private final Schema schema; public HoodieHFileRecordReader(Configuration conf, InputSplit split, JobConf job) throws IOException { FileSplit fileSplit = (FileSplit) split; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java index c301130a8401..3b6b3940489d 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java @@ -74,7 +74,7 @@ public class HoodieROTablePathFilter implements Configurable, PathFilter, Serial * Its quite common, to have all files from a given partition path be passed into accept(), cache the check for hoodie * metadata for known partition paths and the latest versions of files. */ - private Map> hoodiePathCache; + private final Map> hoodiePathCache; /** * Paths that are known to be non-hoodie tables. @@ -214,7 +214,7 @@ public boolean accept(Path path) { if (!hoodiePathCache.containsKey(folder.toString())) { hoodiePathCache.put(folder.toString(), new HashSet<>()); } - LOG.info("Based on hoodie metadata from base path: " + baseDir.toString() + ", caching " + latestFiles.size() + LOG.info("Based on hoodie metadata from base path: " + baseDir + ", caching " + latestFiles.size() + " files under " + folder); for (HoodieBaseFile lfile : latestFiles) { hoodiePathCache.get(folder.toString()).add(new Path(lfile.getPath())); @@ -229,7 +229,7 @@ public boolean accept(Path path) { } catch (TableNotFoundException e) { // Non-hoodie path, accept it. if (LOG.isDebugEnabled()) { - LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder.toString(), baseDir.toString())); + LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder, baseDir)); } nonHoodiePathCache.add(folder.toString()); nonHoodiePathCache.add(baseDir.toString()); @@ -242,7 +242,7 @@ public boolean accept(Path path) { } else { // files is at < 3 level depth in FS tree, can't be hoodie dataset if (LOG.isDebugEnabled()) { - LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder.toString())); + LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder)); } nonHoodiePathCache.add(folder.toString()); return true; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java index f0c2e6a1fe2f..88e96d29e1be 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java @@ -60,7 +60,7 @@ public class InputPathHandler { private final Map> groupedIncrementalPaths; private final List snapshotPaths; private final List nonHoodieInputPaths; - private boolean isIncrementalUseDatabase; + private final boolean isIncrementalUseDatabase; public InputPathHandler(Configuration conf, Path[] inputPaths, List incrementalTables) throws IOException { this.conf = conf; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java index c31041ddc76b..b5a8e0138826 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java @@ -48,7 +48,7 @@ public class HoodieAvroParquetReader extends RecordReader { private final ParquetRecordReader parquetRecordReader; - private Schema baseSchema; + private final Schema baseSchema; public HoodieAvroParquetReader(InputSplit inputSplit, Configuration conf) throws IOException { // get base schema diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java index 99c9ecd210e4..445531b3f2f5 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java @@ -759,11 +759,9 @@ public String[] getLocations() throws IOException { */ @Override public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(inputSplitShim.toString()); - sb.append("InputFormatClass: " + inputFormatClassName); - sb.append("\n"); - return sb.toString(); + return inputSplitShim.toString() + + "InputFormatClass: " + inputFormatClassName + + "\n"; } /** @@ -829,8 +827,7 @@ public boolean equals(Object o) { if (o instanceof CombinePathInputFormat) { CombinePathInputFormat mObj = (CombinePathInputFormat) o; return (opList.equals(mObj.opList)) && (inputFormatClassName.equals(mObj.inputFormatClassName)) - && (deserializerClassName == null ? (mObj.deserializerClassName == null) - : deserializerClassName.equals(mObj.deserializerClassName)); + && (Objects.equals(deserializerClassName, mObj.deserializerClassName)); } return false; } diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java index a30aa178f123..5636339b4d22 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java @@ -51,11 +51,11 @@ public HoodieCombineRealtimeFileSplit() { public HoodieCombineRealtimeFileSplit(JobConf jobConf, List realtimeFileSplits) { super(jobConf, realtimeFileSplits.stream().map(p -> - ((HoodieRealtimeFileSplit) p).getPath()).collect(Collectors.toList()).toArray(new + p.getPath()).collect(Collectors.toList()).toArray(new Path[realtimeFileSplits.size()]), - ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> ((HoodieRealtimeFileSplit) p).getStart()) + ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> p.getStart()) .collect(Collectors.toList()).toArray(new Long[realtimeFileSplits.size()])), - ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> ((HoodieRealtimeFileSplit) p).getLength()) + ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> p.getLength()) .collect(Collectors.toList()).toArray(new Long[realtimeFileSplits.size()])), realtimeFileSplits.stream().map(p -> { try { diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java index a0df73c34e21..3b36974a0c95 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java @@ -76,7 +76,7 @@ public abstract class AbstractRealtimeRecordReader { protected boolean supportPayload; // handle hive type to avro record protected HiveAvroSerializer serializer; - private boolean supportTimestamp; + private final boolean supportTimestamp; public AbstractRealtimeRecordReader(RealtimeSplit split, JobConf job) { this.split = split; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java index a663bbc6ba73..84d8868f5f3b 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java @@ -44,9 +44,9 @@ */ public class HoodieCombineRealtimeRecordReader implements RecordReader { - private static final transient Logger LOG = LoggerFactory.getLogger(HoodieCombineRealtimeRecordReader.class); + private static final Logger LOG = LoggerFactory.getLogger(HoodieCombineRealtimeRecordReader.class); // RecordReaders for each split - private List recordReaders = new LinkedList<>(); + private final List recordReaders = new LinkedList<>(); // Points to the currently iterating record reader private RecordReader currentRecordReader; diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java index b4894c35d418..981ab9ce54b3 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java @@ -139,7 +139,7 @@ public static List getIncrementalTableNames(JobContext job) { Map tablesModeMap = job.getConfiguration() .getValByRegex(HOODIE_CONSUME_MODE_PATTERN_STRING.pattern()); List result = tablesModeMap.entrySet().stream().map(s -> { - if (s.getValue().trim().toUpperCase().equals(INCREMENTAL_SCAN_MODE)) { + if (s.getValue().trim().equalsIgnoreCase(INCREMENTAL_SCAN_MODE)) { Matcher matcher = HOODIE_CONSUME_MODE_PATTERN_STRING.matcher(s.getKey()); return (!matcher.find() ? null : matcher.group(1)); } diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java index aa24bd36fd92..be7c363f6196 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java @@ -27,8 +27,8 @@ import org.apache.hudi.common.model.HoodieFileFormat; import org.apache.hudi.common.model.HoodiePartitionMetadata; import org.apache.hudi.common.table.HoodieTableMetaClient; -import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; import org.apache.hudi.common.table.timeline.TimelineUtils.HollowCommitHandling; import org.apache.hudi.common.table.view.HoodieTableFileSystemView; import org.apache.hudi.common.table.view.TableFileSystemView; @@ -218,12 +218,12 @@ public static FileInputFormat getInputFormat(String path, boolean realtime, Conf * @return */ public static HoodieTimeline filterInstantsTimeline(HoodieTimeline timeline) { - HoodieTimeline commitsAndCompactionTimeline = (HoodieTimeline)timeline.getWriteTimeline(); + HoodieTimeline commitsAndCompactionTimeline = timeline.getWriteTimeline(); Option pendingCompactionInstant = commitsAndCompactionTimeline .filterPendingCompactionTimeline().firstInstant(); if (pendingCompactionInstant.isPresent()) { - HoodieTimeline instantsTimeline = (HoodieTimeline)(commitsAndCompactionTimeline - .findInstantsBefore(pendingCompactionInstant.get().requestedTime())); + HoodieTimeline instantsTimeline = commitsAndCompactionTimeline + .findInstantsBefore(pendingCompactionInstant.get().requestedTime()); int numCommitsFilteredByCompaction = commitsAndCompactionTimeline.getCommitsTimeline().countInstants() - instantsTimeline.getCommitsTimeline().countInstants(); LOG.info("Earliest pending compaction instant is: " + pendingCompactionInstant.get().requestedTime() diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java index b8308011fd88..fb92c70fd4a4 100644 --- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java +++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java @@ -119,8 +119,7 @@ public static boolean requiredProjectionFieldsExistInConf(Configuration configur && readColNames.contains(HoodieRecord.PARTITION_PATH_METADATA_FIELD); } else { return readColNames.contains(hoodieVirtualKeyInfo.get().getRecordKeyField()) - && (hoodieVirtualKeyInfo.get().getPartitionPathField().isPresent() ? readColNames.contains(hoodieVirtualKeyInfo.get().getPartitionPathField().get()) - : true); + && (!hoodieVirtualKeyInfo.get().getPartitionPathField().isPresent() || readColNames.contains(hoodieVirtualKeyInfo.get().getPartitionPathField().get())); } } diff --git a/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java b/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java index d9c7c6e6f200..68fe013a1432 100644 --- a/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java +++ b/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java @@ -154,7 +154,7 @@ private static class StringItem */ private static final String RELEASE_VERSION_INDEX = String.valueOf(QUALIFIER_LIST.indexOf("")); - private String value; + private final String value; public StringItem(String value, boolean followedByDigit) { if (followedByDigit && value.length() == 1) { diff --git a/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java b/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java index 2b9f1af11b43..58b4adefd33c 100644 --- a/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java +++ b/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java @@ -299,7 +299,7 @@ public static String replace(String text, String searchString, String replacemen increase *= (max < 0 ? 16 : (max > 64 ? 64 : max)); StringBuilder buf = new StringBuilder(text.length() + increase); while (end != INDEX_NOT_FOUND) { - buf.append(text.substring(start, end)).append(replacement); + buf.append(text, start, end).append(replacement); start = end + replLength; if (--max == 0) { break; diff --git a/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java b/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java index 72a0ecec78bc..284988cc4cdd 100644 --- a/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java +++ b/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java @@ -108,7 +108,7 @@ public int getId() { INDEX_V1("IDXBLK)+", BlockCategory.INDEX); public enum BlockCategory { - DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN; + DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN } private final byte[] magic; diff --git a/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java b/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java index 00e8594a0c83..dc61280e1686 100644 --- a/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java +++ b/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java @@ -84,11 +84,11 @@ public enum StorageSchemes { // Hopsworks File System HOPSFS("hopsfs", false, true); - private String scheme; + private final String scheme; // null for uncertain if write is transactional, please update this for each FS - private Boolean isWriteTransactional; + private final Boolean isWriteTransactional; // null for uncertain if dfs support atomic create&delete, please update this for each FS - private Boolean supportAtomicCreation; + private final Boolean supportAtomicCreation; StorageSchemes(String scheme, Boolean isWriteTransactional, Boolean supportAtomicCreation) { this.scheme = scheme; diff --git a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java index a40a6f73a2c1..4c3582277242 100644 --- a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java +++ b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java @@ -125,7 +125,7 @@ private void start() { Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); // Todo fetch the worker id or name instead of a uuid. - props.put(ConsumerConfig.GROUP_ID_CONFIG, "hudi-control-group" + UUID.randomUUID().toString()); + props.put(ConsumerConfig.GROUP_ID_CONFIG, "hudi-control-group" + UUID.randomUUID()); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class); diff --git a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java index be7d866df92d..8f904f8d87b6 100644 --- a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java +++ b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java @@ -55,7 +55,7 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -123,7 +123,7 @@ public static int getLatestNumPartitions(String bootstrapServers, String topicNa props.put("bootstrap.servers", bootstrapServers); try { AdminClient client = AdminClient.create(props); - DescribeTopicsResult result = client.describeTopics(Arrays.asList(topicName)); + DescribeTopicsResult result = client.describeTopics(Collections.singletonList(topicName)); Map> values = result.values(); KafkaFuture topicDescription = values.get(topicName); int numPartitions = topicDescription.get().partitions().size(); diff --git a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java index 2ec9dbb9a912..2cef699cb319 100644 --- a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java +++ b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java @@ -29,10 +29,10 @@ * is specifically for hoodie table whose metadata is stored in the hoodie metaserver. */ public class HoodieMetaserverFileSystemView extends HoodieTableFileSystemView { - private String databaseName; - private String tableName; + private final String databaseName; + private final String tableName; - private HoodieMetaserverClient metaserverClient; + private final HoodieMetaserverClient metaserverClient; public HoodieMetaserverFileSystemView(HoodieTableMetaClient metaClient, HoodieTimeline visibleActiveTimeline, HoodieMetaserverConfig config) { diff --git a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java index 2c8be6c30aab..409c3aeece40 100644 --- a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java +++ b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java @@ -55,9 +55,9 @@ public class HoodieMetaserverClientImp implements HoodieMetaserverClient { private final long retryDelayMs; private boolean isConnected; private boolean isLocal; - private ThriftHoodieMetaserver.Iface client; + private final ThriftHoodieMetaserver.Iface client; private TTransport transport; - private DefaultInstantGenerator instantGenerator = new DefaultInstantGenerator(); + private final DefaultInstantGenerator instantGenerator = new DefaultInstantGenerator(); public HoodieMetaserverClientImp(HoodieMetaserverConfig config) { this.config = config; diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java index c8666711189a..4cf4a4c2f162 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java +++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java @@ -229,7 +229,7 @@ public static HoodieWriteResult doWriteOperation(SparkRDDWriteClient client, Jav case INSERT_OVERWRITE_TABLE: return client.insertOverwriteTable(hoodieRecords, instantTime); default: - throw new HoodieException("Not a valid operation type for doWriteOperation: " + operation.toString()); + throw new HoodieException("Not a valid operation type for doWriteOperation: " + operation); } } diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java index 3ba0474a34cb..d8e0e5372e70 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java +++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java @@ -29,7 +29,7 @@ */ public class BaseWriterCommitMessage implements Serializable { - private List writeStatuses; + private final List writeStatuses; public BaseWriterCommitMessage(List writeStatuses) { this.writeStatuses = writeStatuses; diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java index d9db9cd51d19..e79203b918a3 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java +++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java @@ -55,7 +55,7 @@ public class DataSourceInternalWriterHelper { private final SparkRDDWriteClient writeClient; private final HoodieTable hoodieTable; private final WriteOperationType operationType; - private Map extraMetadata; + private final Map extraMetadata; public DataSourceInternalWriterHelper(String instantTime, HoodieWriteConfig writeConfig, StructType structType, SparkSession sparkSession, StorageConfiguration storageConf, Map extraMetadata) { diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java index c68bd60ba634..7b7c08106bca 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java +++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java @@ -40,7 +40,7 @@ public enum InsertMode { */ NON_STRICT("non-strict"); - private String value; + private final String value; InsertMode(String value) { this.value = value; diff --git a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java index f7eb35090e0c..404e3c736107 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java +++ b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java @@ -65,7 +65,7 @@ public static class DataGenerator { + "{\"name\":\"fare\",\"type\": \"double\"}]}"; static Schema avroSchema = new Schema.Parser().parse(TRIP_EXAMPLE_SCHEMA); - private static Random rand = new Random(46474747); + private static final Random RAND = new Random(46474747); private final Map existingKeys; private final String[] partitionPaths; @@ -90,7 +90,7 @@ private static String generateRandomString() { int stringLength = 3; StringBuilder buffer = new StringBuilder(stringLength); for (int i = 0; i < stringLength; i++) { - int randomLimitedInt = leftLimit + (int) (rand.nextFloat() * (rightLimit - leftLimit + 1)); + int randomLimitedInt = leftLimit + (int) (RAND.nextFloat() * (rightLimit - leftLimit + 1)); buffer.append((char) randomLimitedInt); } return buffer.toString(); @@ -107,11 +107,11 @@ public static GenericRecord generateGenericRecord(String rowKey, String riderNam rec.put("ts", timestamp); rec.put("rider", riderName); rec.put("driver", driverName); - rec.put("begin_lat", rand.nextDouble()); - rec.put("begin_lon", rand.nextDouble()); - rec.put("end_lat", rand.nextDouble()); - rec.put("end_lon", rand.nextDouble()); - rec.put("fare", rand.nextDouble() * 100); + rec.put("begin_lat", RAND.nextDouble()); + rec.put("begin_lon", RAND.nextDouble()); + rec.put("end_lat", RAND.nextDouble()); + rec.put("end_lon", RAND.nextDouble()); + rec.put("fare", RAND.nextDouble() * 100); return rec; } @@ -146,7 +146,7 @@ public Stream generateInsertsStream(String randomString, Integer n int currSize = getNumExistingKeys(); return IntStream.range(0, n).boxed().map(i -> { - String partitionPath = partitionPaths[rand.nextInt(partitionPaths.length)]; + String partitionPath = partitionPaths[RAND.nextInt(partitionPaths.length)]; HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath); existingKeys.put(currSize + i, key); numExistingKeys++; @@ -184,7 +184,7 @@ public List generateUpdates(Integer n) { String randomString = generateRandomString(); return IntStream.range(0, n).boxed().map(x -> { try { - return generateUpdateRecord(existingKeys.get(rand.nextInt(numExistingKeys)), randomString); + return generateUpdateRecord(existingKeys.get(RAND.nextInt(numExistingKeys)), randomString); } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } @@ -249,13 +249,12 @@ private static Option convertToString(HoodieRecord record) { } private static Option convertToString(String uuid, String partitionPath, Long ts) { - StringBuffer stringBuffer = new StringBuffer(); - stringBuffer.append("{"); - stringBuffer.append("\"ts\": \"" + (ts == null ? "0.0" : ts) + "\","); - stringBuffer.append("\"uuid\": \"" + uuid + "\","); - stringBuffer.append("\"partitionpath\": \"" + partitionPath + "\""); - stringBuffer.append("}"); - return Option.of(stringBuffer.toString()); + String stringBuffer = "{" + + "\"ts\": \"" + (ts == null ? "0.0" : ts) + "\"," + + "\"uuid\": \"" + uuid + "\"," + + "\"partitionpath\": \"" + partitionPath + "\"" + + "}"; + return Option.of(stringBuffer); } public static List convertToStringList(List records) { diff --git a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java index 0c9c12298cdc..52b452d8a4be 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java +++ b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java @@ -69,8 +69,6 @@ import java.util.ArrayList; import java.util.List; -import scala.Tuple2; - import static org.apache.hudi.common.util.StringUtils.fromUTF8Bytes; /** @@ -194,7 +192,7 @@ public JavaRDD> buildHoodieRecordsForImport(Ja job.getConfiguration()) // To reduce large number of tasks. .coalesce(16 * this.parallelism).map(entry -> { - GenericRecord genericRecord = ((Tuple2) entry)._2(); + GenericRecord genericRecord = ((scala.Tuple2) entry)._2(); Object partitionField = genericRecord.get(this.partitionKey); if (partitionField == null) { throw new HoodieIOException("partition key is missing. :" + this.partitionKey); diff --git a/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java b/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java index f1d10d288558..554f137c0c1b 100644 --- a/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java +++ b/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java @@ -17,10 +17,11 @@ package org.apache.spark.sql.execution.datasources.parquet; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hudi.client.utils.SparkInternalSchemaConverter; import org.apache.hudi.common.util.collection.Pair; + +import org.apache.hadoop.mapreduce.InputSplit; +import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.spark.memory.MemoryMode; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; @@ -39,7 +40,7 @@ public class Spark3HoodieVectorizedParquetRecordReader extends VectorizedParquetRecordReader { // save the col type change info. - private Map> typeChangeInfos; + private final Map> typeChangeInfos; private ColumnarBatch columnarBatch; @@ -48,7 +49,7 @@ public class Spark3HoodieVectorizedParquetRecordReader extends VectorizedParquet private ColumnVector[] columnVectors; // The capacity of vectorized batch. - private int capacity; + private final int capacity; // If true, this class returns batches instead of rows. private boolean returnColumnarBatch; diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java index 7d4eda29dc44..e2f60ec97b0c 100644 --- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java +++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java @@ -24,11 +24,11 @@ public class ColumnNameXLator { - private static Map xformMap = new HashMap<>(); + private static final Map X_FORM_MAP = new HashMap<>(); public static String translateNestedColumn(String colName) { Map.Entry entry; - for (Iterator> ic = xformMap.entrySet().iterator(); ic.hasNext(); colName = + for (Iterator> ic = X_FORM_MAP.entrySet().iterator(); ic.hasNext(); colName = colName.replaceAll(entry.getKey(), entry.getValue())) { entry = ic.next(); } @@ -45,6 +45,6 @@ public static String translate(String colName, boolean nestedColumn) { } static { - xformMap.put("\\$", "_dollar_"); + X_FORM_MAP.put("\\$", "_dollar_"); } } diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java index baf905094d40..76297759892e 100644 --- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java +++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java @@ -446,8 +446,8 @@ public static String generateCreateDDL(String tableName, MessageType storageSche List partitionFields = new ArrayList<>(); for (String partitionKey : config.getSplitStrings(META_SYNC_PARTITION_FIELDS)) { String partitionKeyWithTicks = tickSurround(partitionKey); - partitionFields.add(new StringBuilder().append(partitionKeyWithTicks).append(" ") - .append(getPartitionKeyType(hiveSchema, partitionKeyWithTicks)).toString()); + partitionFields.add(partitionKeyWithTicks + " " + + getPartitionKeyType(hiveSchema, partitionKeyWithTicks)); } String partitionsStr = String.join(",", partitionFields); diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java index f48208a439fd..7a77cab2df1d 100644 --- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java +++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java @@ -83,9 +83,9 @@ public static class Builder { private final MessageType storageSchema; private final Map tableSchema; - private List deleteColumns; - private Map updateColumnTypes; - private Map addColumnTypes; + private final List deleteColumns; + private final Map updateColumnTypes; + private final Map addColumnTypes; public Builder(MessageType storageSchema, Map tableSchema) { this.storageSchema = storageSchema; diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java index 475a43a0bab0..bac20f851d8c 100644 --- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java +++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java @@ -201,7 +201,7 @@ public class HoodieSyncConfig extends HoodieConfig { + "obtained from Hudi's internal metadata table. Note, " + HoodieMetadataConfig.ENABLE + " must be set to true."); private Configuration hadoopConf; - private HoodieMetricsConfig metricsConfig; + private final HoodieMetricsConfig metricsConfig; public HoodieSyncConfig(Properties props) { this(props, HadoopConfigUtils.createHadoopConf(props)); diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java index d7238fbe8bf9..109296d6b33a 100644 --- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java +++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java @@ -37,6 +37,6 @@ public HoodieSyncException(Throwable t) { } protected static String format(String message, Object... args) { - return String.format(String.valueOf(message), (Object[]) args); + return String.format(String.valueOf(message), args); } } diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java index cbe729ae0301..d810c396c743 100644 --- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java +++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java @@ -43,7 +43,7 @@ public class HoodieMetaSyncMetrics { private static final String RECREATE_TABLE_DURATION_MS_METRIC = "recreate_table_duration_ms"; // Metrics are shut down by the shutdown hook added in the Metrics class private Metrics metrics; - private HoodieMetricsConfig metricsConfig; + private final HoodieMetricsConfig metricsConfig; private transient HoodieStorage storage; private final String syncToolName; diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java index d73d787a5dc0..bc710af3a2d2 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java @@ -43,7 +43,7 @@ public class AsyncTimelineServerBasedDetectionStrategy extends TimelineServerBas private static final Logger LOG = LoggerFactory.getLogger(AsyncTimelineServerBasedDetectionStrategy.class); - private AtomicBoolean hasConflict = new AtomicBoolean(false); + private final AtomicBoolean hasConflict = new AtomicBoolean(false); private ScheduledExecutorService asyncDetectorExecutor; public AsyncTimelineServerBasedDetectionStrategy(String basePath, String markerDir, String markerName, Boolean checkCommitConflict) { diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java index bce28e8ae9cd..102203025f35 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java @@ -43,13 +43,13 @@ public class MarkerBasedEarlyConflictDetectionRunnable implements Runnable { private static final Logger LOG = LoggerFactory.getLogger(MarkerBasedEarlyConflictDetectionRunnable.class); - private MarkerHandler markerHandler; - private String markerDir; - private String basePath; - private HoodieStorage storage; - private AtomicBoolean hasConflict; - private long maxAllowableHeartbeatIntervalInMs; - private Set completedCommits; + private final MarkerHandler markerHandler; + private final String markerDir; + private final String basePath; + private final HoodieStorage storage; + private final AtomicBoolean hasConflict; + private final long maxAllowableHeartbeatIntervalInMs; + private final Set completedCommits; private final boolean checkCommitConflict; public MarkerBasedEarlyConflictDetectionRunnable(AtomicBoolean hasConflict, MarkerHandler markerHandler, diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java index 4204a06876f6..cd4ec27f10b6 100644 --- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java +++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java @@ -378,6 +378,6 @@ private void flushMarkersToFile(int markerFileIndex) { closeQuietly(bufferedWriter); closeQuietly(outputStream); } - LOG.debug(markersFilePath.toString() + " written in " + timer.endTimer() + " ms"); + LOG.debug(markersFilePath + " written in " + timer.endTimer() + " ms"); } } \ No newline at end of file diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java index 05b902087a66..73569dc67284 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java @@ -62,8 +62,6 @@ import java.util.Collections; import java.util.List; -import scala.Tuple2; - /** * Loads data from Parquet Sources. * @@ -179,7 +177,7 @@ protected JavaRDD> buildHoodieRecordsForImport job.getConfiguration()) // To reduce large number of tasks. .coalesce(16 * cfg.parallelism).map(entry -> { - GenericRecord genericRecord = ((Tuple2) entry)._2(); + GenericRecord genericRecord = ((scala.Tuple2) entry)._2(); Object partitionField = genericRecord.get(cfg.partitionKey); if (partitionField == null) { throw new HoodieIOException("partition key is missing. :" + cfg.partitionKey); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java index b09c056c6fe5..8ccb745a6204 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java @@ -52,7 +52,7 @@ public class HoodieCleaner { /** * Bag of properties with source, hoodie client, key generator etc. */ - private TypedProperties props; + private final TypedProperties props; public HoodieCleaner(Config cfg, JavaSparkContext jssc) { this(cfg, jssc, UtilHelpers.buildProperties(jssc.hadoopConfiguration(), cfg.propsFilePath, cfg.configs)); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java index 4cf59d58e2e1..fbe3d5c702bb 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java @@ -55,7 +55,7 @@ public class HoodieCompactor { public static final String SCHEDULE_AND_EXECUTE = "scheduleandexecute"; private final Config cfg; private transient FileSystem fs; - private TypedProperties props; + private final TypedProperties props; private final JavaSparkContext jsc; private HoodieTableMetaClient metaClient; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java index 5d7f9dae90a0..f8d0d4c49ab4 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java @@ -110,7 +110,7 @@ public class HoodieDataTableValidator implements Serializable { // Properties with source, hoodie client, key generator etc. private TypedProperties props; - private HoodieTableMetaClient metaClient; + private final HoodieTableMetaClient metaClient; protected transient Option asyncDataTableValidateService; @@ -351,7 +351,7 @@ public void doDataTableValidation() { if (!danglingFiles.isEmpty()) { LOG.error("Data table validation failed due to extra files found for completed commits " + danglingFiles.size()); - danglingFiles.forEach(entry -> LOG.error("Dangling file: " + entry.toString())); + danglingFiles.forEach(entry -> LOG.error("Dangling file: " + entry)); finalResult = false; if (!cfg.ignoreFailed) { throw new HoodieValidationException("Data table validation failed due to dangling files " + danglingFiles.size()); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java index f82c8149e364..e81d15ff6798 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java @@ -114,7 +114,7 @@ public class HoodieDropPartitionsTool implements Serializable { // config private final Config cfg; // Properties with source, hoodie client, key generator etc. - private TypedProperties props; + private final TypedProperties props; private final HoodieTableMetaClient metaClient; @@ -286,7 +286,7 @@ public static void main(String[] args) { try { tool.run(); } catch (Throwable throwable) { - LOG.error("Fail to run deleting table partitions for " + cfg.toString(), throwable); + LOG.error("Fail to run deleting table partitions for " + cfg, throwable); } finally { jsc.stop(); } @@ -384,7 +384,7 @@ private void syncHive(HiveSyncConfig hiveSyncConfig) { + "). Hive metastore URL :" + hiveSyncConfig.getStringOrDefault(HiveSyncConfigHolder.HIVE_URL) + ", basePath :" + cfg.basePath); - LOG.info("Hive Sync Conf => " + hiveSyncConfig.toString()); + LOG.info("Hive Sync Conf => " + hiveSyncConfig); FileSystem fs = HadoopFSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration()); HiveConf hiveConf = new HiveConf(); if (!StringUtils.isNullOrEmpty(cfg.hiveHMSUris)) { diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java index 7071e57c9309..90d958d540cd 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java @@ -93,7 +93,7 @@ public class HoodieIndexer { static final String DROP_INDEX = "dropindex"; private final HoodieIndexer.Config cfg; - private TypedProperties props; + private final TypedProperties props; private final JavaSparkContext jsc; private final HoodieTableMetaClient metaClient; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java index 8a8bf31a8ee9..771cba994f49 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java @@ -202,9 +202,9 @@ public class HoodieMetadataTableValidator implements Serializable { // Spark context private transient JavaSparkContext jsc; // config - private Config cfg; + private final Config cfg; // Properties with source, hoodie client, key generator etc. - private TypedProperties props; + private final TypedProperties props; private final HoodieTableMetaClient metaClient; @@ -212,7 +212,7 @@ public class HoodieMetadataTableValidator implements Serializable { private final String taskLabels; - private List throwables = new ArrayList<>(); + private final List throwables = new ArrayList<>(); public HoodieMetadataTableValidator(JavaSparkContext jsc, Config cfg) { this.jsc = jsc; @@ -1444,11 +1444,8 @@ private boolean assertBaseFilesEquality(FileSlice fileSlice1, FileSlice fileSlic return baseFile1.getFileName().equals(baseFile2.getFileName()) && baseFile1.getFileId().equals(baseFile2.getFileId()) && baseFile1.getFileSize() == baseFile2.getFileSize(); } else { - if (!fileSlice1.getBaseFile().isPresent() == fileSlice2.getBaseFile().isPresent()) { - return false; - } + return fileSlice1.getBaseFile().isPresent() == !fileSlice2.getBaseFile().isPresent(); } - return true; } /** @@ -1770,7 +1767,7 @@ public List> getSortedColumnStatsList(Stri .collect(Collectors.toList()); } else { FileFormatUtils formatUtils = HoodieIOFactory.getIOFactory(metaClient.getStorage()).getFileFormatUtils(HoodieFileFormat.PARQUET); - return fileNames.stream().flatMap(filename -> { + return (List>) fileNames.stream().flatMap(filename -> { if (filename.endsWith(HoodieFileFormat.PARQUET.getFileExtension())) { return formatUtils.readColumnStatsFromMetadata( metaClient.getStorage(), @@ -1781,14 +1778,12 @@ public List> getSortedColumnStatsList(Stri StoragePath storagePartitionPath = new StoragePath(metaClient.getBasePath(), partitionPath); String filePath = new StoragePath(storagePartitionPath, filename).toString(); try { - return ((List>) getLogFileColumnRangeMetadata(filePath, metaClient, allColumnNameList, Option.of(readerSchema), + return getLogFileColumnRangeMetadata(filePath, metaClient, allColumnNameList, Option.of(readerSchema), metadataConfig.getMaxReaderBufferSize()) .stream() // We need to convert file path and use only the file name instead of the complete file path .map(m -> (HoodieColumnRangeMetadata) HoodieColumnRangeMetadata.create(filename, m.getColumnName(), m.getMinValue(), m.getMaxValue(), - m.getNullCount(), m.getValueCount(), m.getTotalSize(), m.getTotalUncompressedSize())) - .collect(Collectors.toList())) - .stream(); + m.getNullCount(), m.getValueCount(), m.getTotalSize(), m.getTotalUncompressedSize())); } catch (IOException e) { throw new HoodieIOException(String.format("Failed to get column stats for file: %s", filePath), e); } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java index 5bc5867be6e7..982434a1abf6 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java @@ -146,7 +146,7 @@ public class HoodieRepairTool { // Repair config private final Config cfg; // Properties with source, hoodie client, key generator etc. - private TypedProperties props; + private final TypedProperties props; // Spark context private final HoodieEngineContext context; private final HoodieTableMetaClient metaClient; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java index 7ab5b5747c6f..fb608c9cb668 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java @@ -47,7 +47,7 @@ public class HoodieTTLJob { private final Config cfg; private final TypedProperties props; private final JavaSparkContext jsc; - private HoodieTableMetaClient metaClient; + private final HoodieTableMetaClient metaClient; public HoodieTTLJob(JavaSparkContext jsc, Config cfg) { this(jsc, cfg, UtilHelpers.buildProperties(jsc.hadoopConfiguration(), cfg.propsFilePath, cfg.configs), diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java index 0df69a685e0f..90267c8c0b8d 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java @@ -111,9 +111,9 @@ public class TableSizeStats implements Serializable { // Spark context private transient JavaSparkContext jsc; // config - private Config cfg; + private final Config cfg; // Properties with source, hoodie client, key generator etc. - private TypedProperties props; + private final TypedProperties props; public TableSizeStats(JavaSparkContext jsc, Config cfg) { this.jsc = jsc; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java index 75cc9df86d3a..78bd0ae0228f 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java @@ -49,9 +49,9 @@ public class HoodieWriteCommitKafkaCallback implements HoodieWriteCommitCallback private static final Logger LOG = LoggerFactory.getLogger(HoodieWriteCommitKafkaCallback.class); - private HoodieConfig hoodieConfig; - private String bootstrapServers; - private String topic; + private final HoodieConfig hoodieConfig; + private final String bootstrapServers; + private final String topic; public HoodieWriteCommitKafkaCallback(HoodieWriteConfig config) { this.hoodieConfig = config; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java index 8e8af55a3c56..273bbb12f228 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java @@ -38,7 +38,7 @@ * Documentation: https://docs.confluent.io/current/connect/kafka-connect-hdfs/index.html */ public class KafkaConnectHdfsProvider extends InitialCheckPointProvider { - private static String FILENAME_SEPARATOR = "[\\+\\.]"; + private static final String FILENAME_SEPARATOR = "[\\+\\.]"; public KafkaConnectHdfsProvider(TypedProperties props) { super(props); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java index ef76a797ca50..9cbc57539e53 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java @@ -41,7 +41,7 @@ */ public class JdbcbasedSchemaProvider extends SchemaProvider { private Schema sourceSchema; - private Map options = new HashMap<>(); + private final Map options = new HashMap<>(); public JdbcbasedSchemaProvider(TypedProperties props, JavaSparkContext jssc) { super(props, jssc); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java index e4dc95e26ecc..acb6e925681a 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java @@ -192,7 +192,7 @@ public static Dataset convertDateColumns(Dataset dataset, Schema schem } }).map(Field::name).collect(Collectors.toList()); - LOG.info("Date fields: " + dateFields.toString()); + LOG.info("Date fields: {}", dateFields); for (String dateCol : dateFields) { dataset = dataset.withColumn(dateCol, functions.col(dateCol).cast(DataTypes.DateType)); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java index 7458e3a59e00..432e2994b799 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java @@ -71,7 +71,7 @@ */ public class DatePartitionPathSelector extends DFSPathSelector { - private static volatile Logger LOG = LoggerFactory.getLogger(DatePartitionPathSelector.class); + private static final Logger LOG = LoggerFactory.getLogger(DatePartitionPathSelector.class); private final String dateFormat; private final int datePartitionDepth; diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java index e09e1c7be812..d9b940097dbd 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java @@ -104,12 +104,10 @@ public static Map strToOffsets(String checkpointStr) { public static String offsetsToStr(OffsetRange[] ranges) { // merge the ranges by partition to maintain one offset range map to one topic partition. ranges = mergeRangesByTopicPartition(ranges); - StringBuilder sb = new StringBuilder(); // at least 1 partition will be present. - sb.append(ranges[0].topic() + ","); - sb.append(Arrays.stream(ranges).map(r -> String.format("%s:%d", r.partition(), r.untilOffset())) - .collect(Collectors.joining(","))); - return sb.toString(); + return ranges[0].topic() + "," + + Arrays.stream(ranges).map(r -> String.format("%s:%d", r.partition(), r.untilOffset())) + .collect(Collectors.joining(",")); } /** diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java index 27aa90661981..a1c73d95800e 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java @@ -50,6 +50,6 @@ public Option getDescription() { * */ public enum ProcessingDecision { DO_PROCESS, - DO_SKIP; + DO_SKIP } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java index cc2615455547..a2aaeeb89e24 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java @@ -54,9 +54,9 @@ import java.io.Serializable; import java.util.HashMap; -import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_HISTORY_PATH; import static org.apache.hudi.common.table.HoodieTableConfig.PARTITION_METAFILE_USE_BASE_FORMAT; import static org.apache.hudi.common.table.HoodieTableConfig.POPULATE_META_FIELDS; +import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_HISTORY_PATH; import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_TIMEZONE; import static org.apache.hudi.config.HoodieWriteConfig.PRECOMBINE_FIELD_NAME; import static org.apache.hudi.config.HoodieWriteConfig.WRITE_TABLE_VERSION; @@ -110,7 +110,7 @@ public class BootstrapExecutor implements Serializable { */ private transient FileSystem fs; - private String bootstrapBasePath; + private final String bootstrapBasePath; /** * Bootstrap Executor. diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java index 92c4a843c94a..0b5ed0ff26c4 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java @@ -70,12 +70,12 @@ */ public class HoodieMultiTableStreamer { - private static Logger logger = LoggerFactory.getLogger(HoodieMultiTableStreamer.class); + private static final Logger LOG = LoggerFactory.getLogger(HoodieMultiTableStreamer.class); - private List tableExecutionContexts; + private final List tableExecutionContexts; private transient JavaSparkContext jssc; - private Set successTables; - private Set failedTables; + private final Set successTables; + private final Set failedTables; public HoodieMultiTableStreamer(Config config, JavaSparkContext jssc) throws IOException { this.tableExecutionContexts = new ArrayList<>(); @@ -119,7 +119,7 @@ private void checkIfTableConfigFileExists(String configFolder, FileSystem fs, St //commonProps are passed as parameter which contain table to config file mapping private void populateTableExecutionContextList(TypedProperties properties, String configFolder, FileSystem fs, Config config) throws IOException { List tablesToBeIngested = getTablesToBeIngested(properties); - logger.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested); + LOG.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested); TableExecutionContext executionContext; for (String table : tablesToBeIngested) { String[] tableWithDatabase = table.split("\\."); @@ -270,11 +270,11 @@ public static void main(String[] args) throws IOException { } if (config.enableHiveSync) { - logger.warn("--enable-hive-sync will be deprecated in a future release; please use --enable-sync instead for Hive syncing"); + LOG.warn("--enable-hive-sync will be deprecated in a future release; please use --enable-sync instead for Hive syncing"); } if (config.targetTableName != null) { - logger.warn(String.format("--target-table is deprecated and will be removed in a future release due to it's useless;" + LOG.warn(String.format("--target-table is deprecated and will be removed in a future release due to it's useless;" + " please use %s to configure multiple target tables", HoodieStreamerConfig.TABLES_TO_BE_INGESTED.key())); } @@ -458,14 +458,14 @@ public void sync() { new HoodieStreamer(context.getConfig(), jssc, Option.ofNullable(context.getProperties())).sync(); successTables.add(Helpers.getTableWithDatabase(context)); } catch (Exception e) { - logger.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e); + LOG.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e); failedTables.add(Helpers.getTableWithDatabase(context)); } } - logger.info("Ingestion was successful for topics: " + successTables); + LOG.info("Ingestion was successful for topics: " + successTables); if (!failedTables.isEmpty()) { - logger.info("Ingestion failed for topics: " + failedTables); + LOG.info("Ingestion failed for topics: " + failedTables); } } diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java index f784a7dba6db..a23687d5c7d7 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java @@ -119,7 +119,7 @@ public class HoodieStreamer implements Serializable { public static final String CHECKPOINT_KEY = HoodieWriteConfig.STREAMER_CHECKPOINT_KEY; public static final String CHECKPOINT_RESET_KEY = STREAMER_CHECKPOINT_RESET_KEY_V1; - protected final transient Config cfg; + protected transient Config cfg; /** * NOTE: These properties are already consolidated w/ CLI provided config-overrides. diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java index 12c869666e1f..8413b1fac318 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java @@ -192,7 +192,7 @@ public class StreamSync implements Serializable, Closeable { */ private transient Option transformer; - private String keyGenClassName; + private final String keyGenClassName; /** * Filesystem used. @@ -202,12 +202,12 @@ public class StreamSync implements Serializable, Closeable { /** * Spark context Wrapper. */ - private final transient HoodieSparkEngineContext hoodieSparkContext; + private transient HoodieSparkEngineContext hoodieSparkContext; /** * Spark Session. */ - private transient SparkSession sparkSession; + private final transient SparkSession sparkSession; /** * Hive Config. @@ -1278,7 +1278,7 @@ public Option getClusteringInstantOpt() { class WriteClientWriteResult { private Map> partitionToReplacedFileIds = Collections.emptyMap(); - private JavaRDD writeStatusRDD; + private final JavaRDD writeStatusRDD; public WriteClientWriteResult(JavaRDD writeStatusRDD) { this.writeStatusRDD = writeStatusRDD;