From e180b9eb75494f0d3b13dfd415b3e5d46417f050 Mon Sep 17 00:00:00 2001
From: vinoth chandar
Date: Thu, 19 Dec 2024 13:30:38 -0800
Subject: [PATCH 1/4] - IDEA code cleanup changes - On top, fixed to make
checkstyle happy
---
.../aws/sync/AWSGlueCatalogSyncClient.java | 3 +-
.../hudi/cli/commands/CommitsCommand.java | 4 +-
.../hudi/cli/commands/CompactionCommand.java | 4 +-
.../org/apache/hudi/cli/utils/CLIUtils.java | 6 +-
.../hudi/cli/utils/InputStreamConsumer.java | 2 +-
.../hudi/cli/utils/SparkTempViewProvider.java | 4 +-
.../hudi/async/AsyncClusteringService.java | 2 +-
.../hudi/async/AsyncCompactService.java | 2 +-
.../apache/hudi/async/HoodieAsyncService.java | 6 +-
.../util/HoodieWriteCommitCallbackUtil.java | 4 +-
.../org/apache/hudi/client/WriteStatus.java | 16 +--
.../lock/FileSystemBasedLockProvider.java | 4 +-
.../client/transaction/lock/LockManager.java | 2 +-
.../apache/hudi/config/HoodieIndexConfig.java | 2 +-
.../apache/hudi/config/HoodieWriteConfig.java | 4 +-
.../hudi/index/bloom/BloomIndexFileInfo.java | 10 +-
.../DefaultHBaseQPSResourceAllocator.java | 2 +-
.../apache/hudi/metrics/HoodieMetrics.java | 4 +-
.../apache/hudi/table/WorkloadProfile.java | 12 +-
.../org/apache/hudi/table/WorkloadStat.java | 12 +-
.../hudi/table/action/clean/CleanPlanner.java | 7 +-
.../strategy/ClusteringPlanStrategy.java | 17 ++-
.../hudi/table/action/commit/BucketInfo.java | 11 +-
.../table/action/commit/InsertBucket.java | 8 +-
.../hudi/table/action/commit/SmallFile.java | 9 +-
.../compact/RunCompactionActionExecutor.java | 2 +-
.../ScheduleCompactionActionExecutor.java | 2 +-
.../strategy/CompositeCompactionStrategy.java | 2 +-
.../hudi/table/marker/DirectWriteMarkers.java | 2 +-
.../TimelineServerBasedWriteMarkers.java | 8 +-
.../apache/hudi/util/HttpRequestClient.java | 2 +-
.../hudi/client/FlinkTaskContextSupplier.java | 2 +-
.../hudi/client/HoodieFlinkWriteClient.java | 2 +-
.../hudi/execution/ExplicitWriteHandler.java | 2 +-
.../row/parquet/ParquetSchemaConverter.java | 6 +-
.../FlinkInsertCommitActionExecutor.java | 2 +-
.../FlinkUpsertCommitActionExecutor.java | 2 +-
.../upgrade/FlinkUpgradeDowngradeHelper.java | 3 +-
.../hudi/client/HoodieJavaWriteClient.java | 3 +-
.../bulkinsert/JavaGlobalSortPartitioner.java | 14 +-
.../JavaInsertCommitActionExecutor.java | 2 +-
.../JavaUpsertCommitActionExecutor.java | 2 +-
.../action/commit/JavaUpsertPartitioner.java | 8 +-
.../hudi/client/HoodieSparkCompactor.java | 2 +-
.../hudi/client/SparkRDDReadClient.java | 2 +-
.../validator/SparkPreCommitValidator.java | 8 +-
.../bulkinsert/BulkInsertMapFunction.java | 14 +-
.../bulkinsert/GlobalSortPartitioner.java | 6 +-
.../RDDPartitionSortPartitioner.java | 9 +-
.../BucketizedBloomCheckPartitioner.java | 6 +-
.../index/hbase/SparkHoodieHBaseIndex.java | 4 +-
.../io/storage/HoodieSparkParquetReader.java | 5 +-
.../hudi/metrics/DistributedRegistry.java | 2 +-
.../hudi/sort/SpaceCurveSortingHelper.java | 22 ++--
.../SparkBootstrapCommitActionExecutor.java | 2 +-
...rkDeletePartitionCommitActionExecutor.java | 2 +-
.../action/commit/UpsertPartitioner.java | 6 +-
.../hudi/avro/AvroSchemaCompatibility.java | 4 +-
.../org/apache/hudi/avro/HoodieAvroUtils.java | 2 +-
.../apache/hudi/avro/processors/Parser.java | 2 +-
.../apache/hudi/common/HoodieJsonPayload.java | 2 +-
.../hudi/common/bloom/HashFunction.java | 6 +-
.../common/bootstrap/FileStatusUtils.java | 2 +-
.../common/config/HoodieStorageConfig.java | 2 +-
.../common/engine/HoodieEngineContext.java | 2 +-
.../common/fs/ConsistencyGuardConfig.java | 10 +-
.../common/fs/SizeAwareDataOutputStream.java | 4 +-
.../apache/hudi/common/model/BaseFile.java | 2 +-
.../common/model/ConsistentHashingNode.java | 8 +-
.../model/DefaultHoodieRecordPayload.java | 2 +-
.../apache/hudi/common/model/FileSlice.java | 12 +-
.../hudi/common/model/HoodieFileGroup.java | 10 +-
.../apache/hudi/common/model/HoodieKey.java | 8 +-
.../hudi/common/model/HoodieRecord.java | 10 +-
.../model/HoodieRecordGlobalLocation.java | 12 +-
.../common/model/HoodieRecordLocation.java | 10 +-
.../model/OverwriteWithLatestAvroPayload.java | 2 +-
.../model/PartialUpdateAvroPayload.java | 2 +-
.../hudi/common/model/RewriteAvroPayload.java | 2 +-
.../common/table/HoodieTableMetaClient.java | 12 +-
.../table/cdc/HoodieCDCInferenceCase.java | 2 +-
.../table/log/HoodieFileSliceReader.java | 12 +-
.../common/table/log/HoodieLogFileReader.java | 2 +-
.../log/HoodieLogFormatReverseReader.java | 2 +-
.../table/log/block/HoodieDataBlock.java | 6 +-
.../table/log/block/HoodieDeleteBlock.java | 4 +-
...diePositionBasedFileGroupRecordBuffer.java | 7 +-
.../timeline/ArchivedTimelineLoader.java | 6 +-
.../timeline/CompletionTimeQueryView.java | 20 +--
.../table/timeline/HoodieActiveTimeline.java | 122 +++++++++---------
.../timeline/HoodieArchivedTimeline.java | 26 ++--
.../timeline/HoodieInstantTimeGenerator.java | 10 +-
.../common/table/timeline/TimelineUtils.java | 6 +-
.../versioning/v1/ActiveTimelineV1.java | 4 +-
.../v1/CompletionTimeQueryViewV1.java | 2 +-
.../versioning/v2/ActiveTimelineV2.java | 2 +-
.../HoodieTablePreCommitFileSystemView.java | 10 +-
.../view/RemoteHoodieTableFileSystemView.java | 2 +-
.../hudi/common/util/AvroSchemaCache.java | 4 +-
.../apache/hudi/common/util/ConfigUtils.java | 3 +-
.../hudi/common/util/InternalSchemaCache.java | 10 +-
.../util/collection/BitCaskDiskMap.java | 24 ++--
.../hudi/common/util/collection/DiskMap.java | 2 +-
.../hudi/common/util/collection/Pair.java | 2 +-
.../common/util/collection/RocksDBDAO.java | 2 +-
.../hudi/common/util/collection/Triple.java | 4 +-
.../hudi/common/util/hash/JenkinsHash.java | 12 +-
.../hudi/common/util/hash/MurmurHash.java | 8 +-
.../util/queue/DisruptorMessageQueue.java | 2 +-
.../HoodieMetricsPrometheusConfig.java | 2 +-
.../apache/hudi/expression/Predicates.java | 6 +-
.../index/secondary/HoodieSecondaryIndex.java | 1 -
.../schema/InternalSchemaBuilder.java | 1 -
.../apache/hudi/internal/schema/Types.java | 3 +-
.../action/InternalSchemaChangeApplier.java | 4 +-
.../internal/schema/action/TableChange.java | 4 +-
.../schema/utils/InternalSchemaUtils.java | 6 +-
.../internal/schema/utils/SerDeHelper.java | 4 +-
.../metadata/HoodieTableMetadataUtil.java | 10 +-
.../hudi/metrics/MetricsGraphiteReporter.java | 4 +-
.../custom/CustomizableMetricsReporter.java | 4 +-
.../hudi/metrics/datadog/DatadogReporter.java | 2 +-
.../utils/QuickstartConfigurations.java | 14 +-
.../quickstart/utils/SchemaBuilder.java | 4 +-
.../spark/HoodieSparkBootstrapExample.java | 2 +-
.../spark/HoodieWriteClientExample.java | 6 +-
.../hudi/sink/bulk/sort/SortOperator.java | 2 +-
.../FlinkConsistentBucketUpdateStrategy.java | 4 +-
.../sink/transform/ChainedTransformer.java | 2 +-
.../hudi/source/ExpressionEvaluators.java | 4 +-
.../apache/hudi/table/HoodieTableSource.java | 12 +-
.../apache/hudi/table/format/FormatUtils.java | 3 +-
.../table/lookup/HoodieLookupFunction.java | 2 +-
.../apache/hudi/util/FlinkWriteClients.java | 2 +-
.../hudi/util/RowDataToAvroConverters.java | 4 +-
.../cow/vector/HeapMapColumnVector.java | 4 +-
.../cow/vector/HeapMapColumnVector.java | 4 +-
.../cow/vector/HeapMapColumnVector.java | 4 +-
.../config/DFSPropertiesConfiguration.java | 2 +-
.../apache/hudi/common/util/AvroOrcUtils.java | 18 +--
.../hudi/common/util/HadoopConfigUtils.java | 3 +-
.../apache/hudi/common/util/ParquetUtils.java | 2 +-
.../hadoop/fs/BoundedFsDataInputStream.java | 4 +-
.../fs/HoodieRetryWrapperFileSystem.java | 13 +-
.../fs/HoodieSerializableFileStatus.java | 22 ++--
.../hadoop/fs/HoodieWrapperFileSystem.java | 4 +-
.../hadoop/fs/inline/InMemoryFileSystem.java | 3 +-
.../hudi/io/hadoop/HoodieAvroHFileWriter.java | 4 +-
.../hudi/io/hadoop/HoodieAvroOrcWriter.java | 2 +-
.../io/hadoop/HoodieBaseParquetWriter.java | 4 +-
.../hudi/hadoop/HoodieHFileRecordReader.java | 4 +-
.../hudi/hadoop/HoodieROTablePathFilter.java | 8 +-
.../apache/hudi/hadoop/InputPathHandler.java | 2 +-
.../hadoop/avro/HoodieAvroParquetReader.java | 2 +-
.../hive/HoodieCombineHiveInputFormat.java | 11 +-
.../hive/HoodieCombineRealtimeFileSplit.java | 6 +-
.../AbstractRealtimeRecordReader.java | 2 +-
.../HoodieCombineRealtimeRecordReader.java | 4 +-
.../hudi/hadoop/utils/HoodieHiveUtils.java | 2 +-
.../hadoop/utils/HoodieInputFormatUtils.java | 8 +-
.../utils/HoodieRealtimeInputFormatUtils.java | 3 +-
.../hudi/common/util/ComparableVersion.java | 2 +-
.../apache/hudi/common/util/StringUtils.java | 2 +-
.../apache/hudi/io/hfile/HFileBlockType.java | 2 +-
.../apache/hudi/storage/StorageSchemes.java | 6 +-
.../kafka/KafkaConnectControlAgent.java | 2 +-
.../hudi/connect/utils/KafkaConnectUtils.java | 4 +-
.../view/HoodieMetaserverFileSystemView.java | 6 +-
.../client/HoodieMetaserverClientImp.java | 4 +-
.../java/org/apache/hudi/DataSourceUtils.java | 2 +-
.../internal/BaseWriterCommitMessage.java | 2 +-
.../DataSourceInternalWriterHelper.java | 2 +-
.../java/org/apache/hudi/sql/InsertMode.java | 2 +-
.../java/org/apache/hudi/QuickstartUtils.java | 31 +++--
.../hudi/cli/HDFSParquetImporterUtils.java | 4 +-
...k3HoodieVectorizedParquetRecordReader.java | 9 +-
.../hudi/hive/util/ColumnNameXLator.java | 6 +-
.../apache/hudi/hive/util/HiveSchemaUtil.java | 4 +-
.../apache/hudi/hive/SchemaDifference.java | 6 +-
.../hudi/sync/common/HoodieSyncConfig.java | 2 +-
.../hudi/sync/common/HoodieSyncException.java | 2 +-
.../common/metrics/HoodieMetaSyncMetrics.java | 2 +-
.../timeline/service/TimelineService.java | 6 +-
.../service/handlers/MarkerHandler.java | 2 +-
...cTimelineServerBasedDetectionStrategy.java | 2 +-
...erBasedEarlyConflictDetectionRunnable.java | 14 +-
.../handlers/marker/MarkerDirState.java | 4 +-
.../hudi/utilities/HDFSParquetImporter.java | 4 +-
.../apache/hudi/utilities/HoodieCleaner.java | 4 +-
.../hudi/utilities/HoodieCompactor.java | 2 +-
.../utilities/HoodieDataTableValidator.java | 4 +-
.../utilities/HoodieDropPartitionsTool.java | 6 +-
.../apache/hudi/utilities/HoodieIndexer.java | 2 +-
.../HoodieMetadataTableValidator.java | 21 ++-
.../hudi/utilities/HoodieRepairTool.java | 2 +-
.../apache/hudi/utilities/HoodieTTLJob.java | 2 +-
.../apache/hudi/utilities/TableSizeStats.java | 6 +-
.../kafka/HoodieWriteCommitKafkaCallback.java | 6 +-
.../KafkaConnectHdfsProvider.java | 2 +-
.../utilities/perf/TimelineServerPerf.java | 2 +-
.../schema/JdbcbasedSchemaProvider.java | 2 +-
.../apache/hudi/utilities/sources/Source.java | 2 +-
.../sources/debezium/DebeziumSource.java | 2 +-
.../sources/helpers/CloudDataFetcher.java | 8 +-
.../helpers/DatePartitionPathSelector.java | 2 +-
.../sources/helpers/KafkaOffsetGen.java | 8 +-
.../sources/helpers/gcs/MessageValidity.java | 2 +-
.../utilities/streamer/BootstrapExecutor.java | 10 +-
.../streamer/HoodieMultiTableStreamer.java | 22 ++--
.../utilities/streamer/HoodieStreamer.java | 8 +-
.../hudi/utilities/streamer/StreamSync.java | 18 +--
211 files changed, 609 insertions(+), 684 deletions(-)
diff --git a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
index 506023b22a7d3..91857edc1784d 100644
--- a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
+++ b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
@@ -713,10 +713,11 @@ public void managePartitionIndexes(String tableName) throws ExecutionException,
boolean indexesChanges = false;
for (PartitionIndexDescriptor existingIdx: existingIdxsResp.partitionIndexDescriptorList()) {
List idxColumns = existingIdx.keys().stream().map(key -> key.name()).collect(Collectors.toList());
- Boolean toBeRemoved = true;
+ boolean toBeRemoved = true;
for (List neededIdx : partitionsIndexNeeded) {
if (neededIdx.equals(idxColumns)) {
toBeRemoved = false;
+ break;
}
}
if (toBeRemoved) {
diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java
index 3b42edc383a5a..535ff086e47f1 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CommitsCommand.java
@@ -25,9 +25,9 @@
import org.apache.hudi.common.model.HoodieCommitMetadata;
import org.apache.hudi.common.model.HoodieWriteStat;
import org.apache.hudi.common.table.HoodieTableMetaClient;
-import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.InstantComparator;
import org.apache.hudi.common.table.timeline.TimelineUtils;
import org.apache.hudi.common.util.NumericUtils;
@@ -194,7 +194,7 @@ public String showArchivedCommits(
HoodieArchivedTimeline archivedTimeline = HoodieCLI.getTableMetaClient().getArchivedTimeline();
try {
archivedTimeline.loadInstantDetailsInMemory(startTs, endTs);
- HoodieTimeline timelineRange = (HoodieTimeline)archivedTimeline.findInstantsInRange(startTs, endTs);
+ HoodieTimeline timelineRange = archivedTimeline.findInstantsInRange(startTs, endTs);
if (includeExtraMetadata) {
return printCommitsWithMetadata(timelineRange, limit, sortByField, descending, headerOnly, exportTableName, partition);
} else {
diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java
index f98ea7cecbb85..b37146b1c818e 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java
@@ -31,10 +31,10 @@
import org.apache.hudi.client.CompactionAdminClient.ValidationOpResult;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.common.table.HoodieTableMetaClient;
-import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.InstantGenerator;
import org.apache.hudi.common.table.timeline.TimelineMetadataUtils;
import org.apache.hudi.common.util.Option;
@@ -431,7 +431,7 @@ protected static String printCompaction(HoodieCompactionPlan compactionPlan,
}
private static String getTmpSerializerFile() {
- return TMP_DIR + UUID.randomUUID().toString() + ".ser";
+ return TMP_DIR + UUID.randomUUID() + ".ser";
}
private T deSerializeOperationResult(StoragePath inputPath,
diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java
index 524778f8994b8..99310aaa3e7ac 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/CLIUtils.java
@@ -21,10 +21,10 @@
import org.apache.hudi.cli.HoodieCLI;
import org.apache.hudi.common.table.HoodieTableMetaClient;
-import org.apache.hudi.common.table.timeline.HoodieTimeline;
+import org.apache.hudi.common.table.timeline.BaseHoodieTimeline;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
-import org.apache.hudi.common.table.timeline.BaseHoodieTimeline;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
import static org.apache.hudi.cli.utils.CommitUtil.getTimeDaysAgo;
import static org.apache.hudi.common.util.StringUtils.isNullOrEmpty;
@@ -57,7 +57,7 @@ public static HoodieTimeline getTimelineInRange(String startTs, String endTs, bo
if (includeArchivedTimeline) {
HoodieArchivedTimeline archivedTimeline = metaClient.getArchivedTimeline();
archivedTimeline.loadInstantDetailsInMemory(startTs, endTs);
- return ((HoodieTimeline)archivedTimeline.findInstantsInRange(startTs, endTs)).mergeTimeline(activeTimeline);
+ return archivedTimeline.findInstantsInRange(startTs, endTs).mergeTimeline(activeTimeline);
}
return activeTimeline;
}
diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java
index 5209465d8a930..405d4aebb3966 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/InputStreamConsumer.java
@@ -32,7 +32,7 @@
public class InputStreamConsumer extends Thread {
private static final Logger LOG = LoggerFactory.getLogger(InputStreamConsumer.class);
- private InputStream is;
+ private final InputStream is;
public InputStreamConsumer(InputStream is) {
this.is = is;
diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java
index 0ce6002c5cb71..c4aa1a327ef85 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/utils/SparkTempViewProvider.java
@@ -40,8 +40,8 @@ public class SparkTempViewProvider implements TempViewProvider {
private static final Logger LOG = LoggerFactory.getLogger(SparkTempViewProvider.class);
- private JavaSparkContext jsc;
- private SQLContext sqlContext;
+ private final JavaSparkContext jsc;
+ private final SQLContext sqlContext;
public SparkTempViewProvider(String appName) {
try {
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
index 7b3884235010c..ea3983e56ee8a 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
@@ -47,7 +47,7 @@ public abstract class AsyncClusteringService extends HoodieAsyncTableService {
private static final Logger LOG = LoggerFactory.getLogger(AsyncClusteringService.class);
private final int maxConcurrentClustering;
protected transient HoodieEngineContext context;
- private transient BaseClusterer clusteringClient;
+ private final transient BaseClusterer clusteringClient;
public AsyncClusteringService(HoodieEngineContext context, BaseHoodieWriteClient writeClient) {
this(context, writeClient, false);
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java
index 1d1b9421b5964..218391eccbcbc 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncCompactService.java
@@ -47,7 +47,7 @@ public abstract class AsyncCompactService extends HoodieAsyncTableService {
private static final Logger LOG = LoggerFactory.getLogger(AsyncCompactService.class);
private final int maxConcurrentCompaction;
protected transient HoodieEngineContext context;
- private transient BaseCompactor compactor;
+ private final transient BaseCompactor compactor;
public AsyncCompactService(HoodieEngineContext context, BaseHoodieWriteClient client) {
this(context, client, false);
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
index 989babfdcb7a1..410c04e83cb3e 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
@@ -57,11 +57,11 @@ public abstract class HoodieAsyncService implements Serializable {
// Run in daemon mode
private final boolean runInDaemonMode;
// Queue to hold pending compaction/clustering instants
- private transient BlockingQueue pendingInstants = new LinkedBlockingQueue<>();
+ private final transient BlockingQueue pendingInstants = new LinkedBlockingQueue<>();
// Mutex lock for synchronized access to pendingInstants queue
- private transient ReentrantLock queueLock = new ReentrantLock();
+ private final transient ReentrantLock queueLock = new ReentrantLock();
// Condition instance to use with the queueLock
- private transient Condition consumed = queueLock.newCondition();
+ private final transient Condition consumed = queueLock.newCondition();
protected HoodieAsyncService() {
this(false);
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java
index fff0b713528be..cd05b78dfcf2b 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/callback/util/HoodieWriteCommitCallbackUtil.java
@@ -28,14 +28,14 @@
*/
public class HoodieWriteCommitCallbackUtil {
- private static ObjectMapper mapper = new ObjectMapper();
+ private static final ObjectMapper MAPPER = new ObjectMapper();
/**
* Convert data to json string format.
*/
public static String convertToJsonString(Object obj) {
try {
- return mapper.writeValueAsString(obj);
+ return MAPPER.writeValueAsString(obj);
} catch (IOException e) {
throw new HoodieCommitCallbackException("Callback service convert data to json failed", e);
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java
index eac71cba191c4..e9ab4c10d56d9 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/WriteStatus.java
@@ -259,14 +259,12 @@ public boolean isTrackingSuccessfulWrites() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("WriteStatus {");
- sb.append("fileId=").append(fileId);
- sb.append(", writeStat=").append(stat);
- sb.append(", globalError='").append(globalError).append('\'');
- sb.append(", hasErrors='").append(hasErrors()).append('\'');
- sb.append(", errorCount='").append(totalErrorRecords).append('\'');
- sb.append(", errorPct='").append((100.0 * totalErrorRecords) / totalRecords).append('\'');
- sb.append('}');
- return sb.toString();
+ return "WriteStatus {" + "fileId=" + fileId
+ + ", writeStat=" + stat
+ + ", globalError='" + globalError + '\''
+ + ", hasErrors='" + hasErrors() + '\''
+ + ", errorCount='" + totalErrorRecords + '\''
+ + ", errorPct='" + (100.0 * totalErrorRecords) / totalRecords + '\''
+ + '}';
}
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java
index 8c0bc8842b919..7193e6234e55c 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/FileSystemBasedLockProvider.java
@@ -67,8 +67,8 @@ public class FileSystemBasedLockProvider implements LockProvider, Serial
private final transient HoodieStorage storage;
private final transient StoragePath lockFile;
protected LockConfiguration lockConfiguration;
- private SimpleDateFormat sdf;
- private LockInfo lockInfo;
+ private final SimpleDateFormat sdf;
+ private final LockInfo lockInfo;
private String currentOwnerLockInfo;
public FileSystemBasedLockProvider(final LockConfiguration lockConfiguration, final StorageConfiguration> configuration) {
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
index 57ed6df45afb4..bf751e4037b2a 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
@@ -53,7 +53,7 @@ public class LockManager implements Serializable, AutoCloseable {
private final int maxRetries;
private final long maxWaitTimeInMs;
private final RetryHelper lockRetryHelper;
- private transient HoodieLockMetrics metrics;
+ private final transient HoodieLockMetrics metrics;
private volatile LockProvider lockProvider;
public LockManager(HoodieWriteConfig writeConfig, HoodieStorage storage) {
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
index 385532917c498..931d50aeb5c05 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
@@ -524,7 +524,7 @@ public class HoodieIndexConfig extends HoodieConfig {
@Deprecated
public static final String DEFAULT_SIMPLE_INDEX_UPDATE_PARTITION_PATH = SIMPLE_INDEX_UPDATE_PARTITION_PATH_ENABLE.defaultValue();
- private EngineType engineType;
+ private final EngineType engineType;
/**
* Use Spark engine by default.
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
index c7e14b6b4e1b9..b6977d6324caa 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
@@ -835,7 +835,7 @@ public class HoodieWriteConfig extends HoodieConfig {
private HoodieStorageConfig storageConfig;
private HoodieTimeGeneratorConfig timeGeneratorConfig;
private HoodieIndexingConfig indexingConfig;
- private EngineType engineType;
+ private final EngineType engineType;
/**
* @deprecated Use {@link #TBL_NAME} and its methods instead
@@ -2801,7 +2801,7 @@ public static class Builder {
private boolean isCleanConfigSet = false;
private boolean isArchivalConfigSet = false;
private boolean isClusteringConfigSet = false;
- private boolean isOptimizeConfigSet = false;
+ private final boolean isOptimizeConfigSet = false;
private boolean isMetricsConfigSet = false;
private boolean isBootstrapConfigSet = false;
private boolean isMemoryConfigSet = false;
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
index 11ffb785f014e..2d6808c81dbe3 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bloom/BloomIndexFileInfo.java
@@ -90,11 +90,9 @@ public int hashCode() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("BloomIndexFileInfo {");
- sb.append(" fileId=").append(fileId);
- sb.append(" minRecordKey=").append(minRecordKey);
- sb.append(" maxRecordKey=").append(maxRecordKey);
- sb.append('}');
- return sb.toString();
+ return "BloomIndexFileInfo {" + " fileId=" + fileId
+ + " minRecordKey=" + minRecordKey
+ + " maxRecordKey=" + maxRecordKey
+ + '}';
}
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java
index ef17716637c26..a9989b21440f4 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/hbase/DefaultHBaseQPSResourceAllocator.java
@@ -24,7 +24,7 @@
import org.slf4j.LoggerFactory;
public class DefaultHBaseQPSResourceAllocator implements HBaseIndexQPSResourceAllocator {
- private HoodieWriteConfig hoodieWriteConfig;
+ private final HoodieWriteConfig hoodieWriteConfig;
private static final Logger LOG = LoggerFactory.getLogger(DefaultHBaseQPSResourceAllocator.class);
public DefaultHBaseQPSResourceAllocator(HoodieWriteConfig hoodieWriteConfig) {
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
index 15a39249d21fa..73411a3885aec 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metrics/HoodieMetrics.java
@@ -91,8 +91,8 @@ public class HoodieMetrics {
private String conflictResolutionFailureCounterName = null;
private String compactionRequestedCounterName = null;
private String compactionCompletedCounterName = null;
- private HoodieWriteConfig config;
- private String tableName;
+ private final HoodieWriteConfig config;
+ private final String tableName;
private Timer rollbackTimer = null;
private Timer cleanTimer = null;
private Timer archiveTimer = null;
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java
index 8e6160b095483..fb831fb49b7b5 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadProfile.java
@@ -114,12 +114,10 @@ public WriteOperationType getOperationType() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("WorkloadProfile {");
- sb.append("globalStat=").append(globalStat).append(", ");
- sb.append("InputPartitionStat=").append(inputPartitionPathStatMap).append(", ");
- sb.append("OutputPartitionStat=").append(outputPartitionPathStatMap).append(", ");
- sb.append("operationType=").append(operationType);
- sb.append('}');
- return sb.toString();
+ return "WorkloadProfile {" + "globalStat=" + globalStat + ", "
+ + "InputPartitionStat=" + inputPartitionPathStatMap + ", "
+ + "OutputPartitionStat=" + outputPartitionPathStatMap + ", "
+ + "operationType=" + operationType
+ + '}';
}
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java
index 327a5a3ae7980..716dce473aefc 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/WorkloadStat.java
@@ -33,9 +33,9 @@ public class WorkloadStat implements Serializable {
private long numUpdates = 0L;
- private HashMap> insertLocationToCount;
+ private final HashMap> insertLocationToCount;
- private HashMap> updateLocationToCount;
+ private final HashMap> updateLocationToCount;
public WorkloadStat() {
insertLocationToCount = new HashMap<>();
@@ -86,10 +86,8 @@ public HashMap> getInsertLocationToCount() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("WorkloadStat {");
- sb.append("numInserts=").append(numInserts).append(", ");
- sb.append("numUpdates=").append(numUpdates);
- sb.append('}');
- return sb.toString();
+ return "WorkloadStat {" + "numInserts=" + numInserts + ", "
+ + "numUpdates=" + numUpdates
+ + '}';
}
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java
index 893dfe8548a4a..07c622da337db 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/clean/CleanPlanner.java
@@ -89,7 +89,7 @@ public class CleanPlanner implements Serializable {
private final Map fgIdToPendingLogCompactionOperations;
private final HoodieTable hoodieTable;
private final HoodieWriteConfig config;
- private transient HoodieEngineContext context;
+ private final transient HoodieEngineContext context;
private final List savepointedTimestamps;
private Option earliestCommitToRetain = Option.empty();
@@ -237,10 +237,7 @@ private boolean isAnySavepointDeleted(HoodieCleanMetadata cleanMetadata) {
// check for any savepointed removed in latest compared to previous saved list
List removedSavepointedTimestamps = new ArrayList<>(savepointedTimestampsFromLastClean);
removedSavepointedTimestamps.removeAll(savepointedTimestamps);
- if (removedSavepointedTimestamps.isEmpty()) {
- return false;
- }
- return true;
+ return !removedSavepointedTimestamps.isEmpty();
}
/**
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java
index a6894388f6d2f..4d1aa91458ebd 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/cluster/strategy/ClusteringPlanStrategy.java
@@ -151,13 +151,16 @@ protected int getPlanVersion() {
* Transform {@link FileSlice} to {@link HoodieSliceInfo}.
*/
protected static List getFileSliceInfo(List slices) {
- return slices.stream().map(slice -> new HoodieSliceInfo().newBuilder()
- .setPartitionPath(slice.getPartitionPath())
- .setFileId(slice.getFileId())
- .setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING))
- .setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList()))
- .setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING))
- .build()).collect(Collectors.toList());
+ return slices.stream().map(slice -> {
+ new HoodieSliceInfo();
+ return HoodieSliceInfo.newBuilder()
+ .setPartitionPath(slice.getPartitionPath())
+ .setFileId(slice.getFileId())
+ .setDataFilePath(slice.getBaseFile().map(BaseFile::getPath).orElse(StringUtils.EMPTY_STRING))
+ .setDeltaFilePaths(slice.getLogFiles().map(f -> f.getPath().toString()).collect(Collectors.toList()))
+ .setBootstrapFilePath(slice.getBaseFile().map(bf -> bf.getBootstrapBaseFile().map(bbf -> bbf.getPath()).orElse(StringUtils.EMPTY_STRING)).orElse(StringUtils.EMPTY_STRING))
+ .build();
+ }).collect(Collectors.toList());
}
/**
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
index 6547da6425460..898522b8b4437 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
@@ -50,12 +50,11 @@ public String getPartitionPath() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("BucketInfo {");
- sb.append("bucketType=").append(bucketType).append(", ");
- sb.append("fileIdPrefix=").append(fileIdPrefix).append(", ");
- sb.append("partitionPath=").append(partitionPath);
- sb.append('}');
- return sb.toString();
+ String sb = "BucketInfo {" + "bucketType=" + bucketType + ", "
+ + "fileIdPrefix=" + fileIdPrefix + ", "
+ + "partitionPath=" + partitionPath
+ + '}';
+ return sb;
}
@Override
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java
index 2cedbe8658815..8861621ff96ee 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/InsertBucket.java
@@ -32,10 +32,8 @@ public class InsertBucket implements Serializable {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("InsertBucket {");
- sb.append("bucketNumber=").append(bucketNumber).append(", ");
- sb.append("weight=").append(weight);
- sb.append('}');
- return sb.toString();
+ return "InsertBucket {" + "bucketNumber=" + bucketNumber + ", "
+ + "weight=" + weight
+ + '}';
}
}
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
index e495d28e10bda..9be817d96fb46 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
@@ -32,10 +32,9 @@ public class SmallFile implements Serializable {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("SmallFile {");
- sb.append("location=").append(location).append(", ");
- sb.append("sizeBytes=").append(sizeBytes);
- sb.append('}');
- return sb.toString();
+ String sb = "SmallFile {" + "location=" + location + ", "
+ + "sizeBytes=" + sizeBytes
+ + '}';
+ return sb;
}
}
\ No newline at end of file
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
index 276bec1b9bb14..7ad9df2a58b0a 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
@@ -56,7 +56,7 @@ public class RunCompactionActionExecutor extends
private final HoodieCompactor compactor;
private final HoodieCompactionHandler compactionHandler;
- private WriteOperationType operationType;
+ private final WriteOperationType operationType;
private final HoodieMetrics metrics;
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java
index e83800e45daaf..302d9069b2352 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/ScheduleCompactionActionExecutor.java
@@ -53,7 +53,7 @@
public class ScheduleCompactionActionExecutor extends BaseActionExecutor> {
private static final Logger LOG = LoggerFactory.getLogger(ScheduleCompactionActionExecutor.class);
- private WriteOperationType operationType;
+ private final WriteOperationType operationType;
private final Option
*/
public class MurmurHash extends Hash {
- private static MurmurHash _instance = new MurmurHash();
+ private static final MurmurHash INSTANCE = new MurmurHash();
public static Hash getInstance() {
- return _instance;
+ return INSTANCE;
}
@Override
@@ -56,7 +56,7 @@ public int hash(byte[] data, int offset, int length, int seed) {
k = k << 8;
k = k | (data[i4 + 1] & 0xff);
k = k << 8;
- k = k | (data[i4 + 0] & 0xff);
+ k = k | (data[i4] & 0xff);
k *= m;
k ^= k >>> r;
k *= m;
@@ -77,7 +77,7 @@ public int hash(byte[] data, int offset, int length, int seed) {
h ^= (int) data[length - 2] << 8;
}
if (left >= 1) {
- h ^= (int) data[length - 1];
+ h ^= data[length - 1];
}
h *= m;
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java
index ae41c2a3a937b..606c188986cdc 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/util/queue/DisruptorMessageQueue.java
@@ -48,7 +48,7 @@ public class DisruptorMessageQueue implements HoodieMessageQueue {
private final Disruptor queue;
private final Function transformFunction;
private final RingBuffer ringBuffer;
- private AtomicReference throwable = new AtomicReference<>(null);
+ private final AtomicReference throwable = new AtomicReference<>(null);
private boolean isShutdown = false;
private boolean isStarted = false;
diff --git a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java
index b0a019f811069..2340dd5091698 100644
--- a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java
+++ b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsPrometheusConfig.java
@@ -182,7 +182,7 @@ public static HoodieMetricsPrometheusConfig.Builder newBuilder() {
public static class Builder {
- private HoodieMetricsPrometheusConfig hoodieMetricsPrometheusConfig = new HoodieMetricsPrometheusConfig();
+ private final HoodieMetricsPrometheusConfig hoodieMetricsPrometheusConfig = new HoodieMetricsPrometheusConfig();
public Builder fromProperties(Properties props) {
this.hoodieMetricsPrometheusConfig.getProps().putAll(props);
diff --git a/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java b/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java
index 11c4f39507f1b..49c04de93a18e 100644
--- a/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java
+++ b/hudi-common/src/main/java/org/apache/hudi/expression/Predicates.java
@@ -166,11 +166,7 @@ public Boolean eval(StructLike data) {
if (right != null && !(Boolean) right) {
return false;
} else {
- if (left != null && right != null) {
- return true;
- } else {
- return false;
- }
+ return left != null && right != null;
}
}
}
diff --git a/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java b/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java
index 8e9a7e01a1621..c75da2b74ccaa 100644
--- a/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java
+++ b/hudi-common/src/main/java/org/apache/hudi/index/secondary/HoodieSecondaryIndex.java
@@ -80,7 +80,6 @@ private void validate() {
}
break;
default:
- return;
}
}
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
index 7978c21e54c68..7674caf971bc1 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/InternalSchemaBuilder.java
@@ -170,7 +170,6 @@ private void visitIdToField(Type type, Map index) {
}
return;
default:
- return;
}
}
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
index ed03a7349cb72..ac0768897584c 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/Types.java
@@ -23,6 +23,7 @@
import java.io.Serializable;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@@ -650,7 +651,7 @@ public Field field(int id) {
@Override
public List fields() {
- return Arrays.asList(elementField);
+ return Collections.singletonList(elementField);
}
public int elementId() {
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java
index 36aac462a137e..b4b404e035183 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/InternalSchemaChangeApplier.java
@@ -28,7 +28,7 @@
* Manage schema change for HoodieWriteClient.
*/
public class InternalSchemaChangeApplier {
- private InternalSchema latestSchema;
+ private final InternalSchema latestSchema;
public InternalSchemaChangeApplier(InternalSchema latestSchema) {
this.latestSchema = latestSchema;
@@ -75,7 +75,7 @@ public InternalSchema applyAddChange(
throw new IllegalArgumentException(String.format("only support first/before/after but found: %s", positionType));
}
} else {
- throw new IllegalArgumentException(String.format("positionType should be specified"));
+ throw new IllegalArgumentException("positionType should be specified");
}
return SchemaChangeUtils.applyTableChanges2Schema(latestSchema, add);
}
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
index 35b3c781b4ee9..e9932266b6579 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/action/TableChange.java
@@ -42,9 +42,9 @@ public interface TableChange {
*/
enum ColumnChangeID {
ADD, UPDATE, DELETE, PROPERTY_CHANGE, REPLACE;
- private String name;
+ private final String name;
- private ColumnChangeID() {
+ ColumnChangeID() {
this.name = this.name().toLowerCase(Locale.ROOT);
}
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
index 94e72ff7180ed..c1fbc902f758c 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/InternalSchemaUtils.java
@@ -90,7 +90,7 @@ public static InternalSchema pruneInternalSchemaByID(InternalSchema schema, List
if (f != null) {
newFields.add(f);
} else {
- throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema.toString()));
+ throw new HoodieSchemaException(String.format("cannot find pruned id %s in currentSchema %s", id, schema));
}
}
}
@@ -111,10 +111,8 @@ private static Type pruneType(Type type, List fieldIds) {
Type newType = pruneType(f.type(), fieldIds);
if (fieldIds.contains(f.fieldId())) {
newTypes.add(f.type());
- } else if (newType != null) {
- newTypes.add(newType);
} else {
- newTypes.add(null);
+ newTypes.add(newType);
}
}
boolean changed = false;
diff --git a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
index 7891fc4582cd9..35545f16cdf37 100644
--- a/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
+++ b/hudi-common/src/main/java/org/apache/hudi/internal/schema/utils/SerDeHelper.java
@@ -33,7 +33,7 @@
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
@@ -341,7 +341,7 @@ public static String inheritSchemas(InternalSchema newSchema, String oldSchemas)
return "";
}
if (oldSchemas == null || oldSchemas.isEmpty()) {
- return toJson(Arrays.asList(newSchema));
+ return toJson(Collections.singletonList(newSchema));
}
String checkedString = "{\"schemas\":[";
if (!oldSchemas.startsWith("{\"schemas\":")) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
index 27ea15bc2509f..2da11bea079df 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java
@@ -829,9 +829,9 @@ public static HoodieData convertMetadataToRecordIndexRecords(Hoodi
return deletedRecordKeys.stream().map(recordKey -> HoodieMetadataPayload.createRecordIndexDelete(recordKey)).collect(toList()).iterator();
}
// ignore log file data blocks.
- return new ArrayList().iterator();
+ return Collections.emptyIterator();
} else {
- throw new HoodieIOException("Unsupported file type " + fullFilePath.toString() + " while generating MDT records");
+ throw new HoodieIOException("Unsupported file type " + fullFilePath + " while generating MDT records");
}
});
@@ -933,7 +933,7 @@ public static List getRecordKeysDeletedOrUpdated(HoodieEngineContext eng
return getRecordKeys(fullFilePath.toString(), dataTableMetaClient, finalWriterSchemaOpt, maxBufferSize, instantTime, true, true)
.iterator();
} else {
- throw new HoodieIOException("Found unsupported file type " + fullFilePath.toString() + ", while generating MDT records");
+ throw new HoodieIOException("Found unsupported file type " + fullFilePath + ", while generating MDT records");
}
}).collectAsList();
} catch (Exception e) {
@@ -2662,7 +2662,7 @@ public static HoodieMetadataColumnStats mergeColumnStatsRecords(HoodieMetadataCo
Comparable minValue =
(Comparable) Stream.of(
- (Comparable) unwrapAvroValueWrapper(prevColumnStats.getMinValue()),
+ unwrapAvroValueWrapper(prevColumnStats.getMinValue()),
(Comparable) unwrapAvroValueWrapper(newColumnStats.getMinValue()))
.filter(Objects::nonNull)
.min(Comparator.naturalOrder())
@@ -2670,7 +2670,7 @@ public static HoodieMetadataColumnStats mergeColumnStatsRecords(HoodieMetadataCo
Comparable maxValue =
(Comparable) Stream.of(
- (Comparable) unwrapAvroValueWrapper(prevColumnStats.getMaxValue()),
+ unwrapAvroValueWrapper(prevColumnStats.getMaxValue()),
(Comparable) unwrapAvroValueWrapper(newColumnStats.getMaxValue()))
.filter(Objects::nonNull)
.max(Comparator.naturalOrder())
diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
index e3acab9a90b9d..161536e19cc5e 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/MetricsGraphiteReporter.java
@@ -39,8 +39,8 @@ public class MetricsGraphiteReporter extends MetricsReporter {
private final MetricRegistry registry;
private final GraphiteReporter graphiteReporter;
private final HoodieMetricsConfig metricsConfig;
- private String serverHost;
- private int serverPort;
+ private final String serverHost;
+ private final int serverPort;
private final int periodSeconds;
public MetricsGraphiteReporter(HoodieMetricsConfig metricsConfig, MetricRegistry registry) {
diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
index 13574b1e15693..6922393f39aa4 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/custom/CustomizableMetricsReporter.java
@@ -28,8 +28,8 @@
* Extensible metrics reporter for custom implementation.
*/
public abstract class CustomizableMetricsReporter extends MetricsReporter {
- private Properties props;
- private MetricRegistry registry;
+ private final Properties props;
+ private final MetricRegistry registry;
public CustomizableMetricsReporter(Properties props, MetricRegistry registry) {
this.props = props;
diff --git a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
index 32b0ee809f934..1f3e2a4ca4a85 100644
--- a/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
+++ b/hudi-common/src/main/java/org/apache/hudi/metrics/datadog/DatadogReporter.java
@@ -173,6 +173,6 @@ String build() {
}
enum MetricType {
- gauge;
+ gauge
}
}
diff --git a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java
index 47a5009ade59f..ca75d8db11719 100644
--- a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java
+++ b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/QuickstartConfigurations.java
@@ -113,12 +113,11 @@ public static String getCreateHoodieTableDDL(
}
public static String getCreateHudiCatalogDDL(final String catalogName, final String catalogPath) {
- StringBuilder builder = new StringBuilder();
- builder.append("create catalog ").append(catalogName).append(" with (\n");
- builder.append(" 'type' = 'hudi',\n"
- + " 'catalog.path' = '").append(catalogPath).append("'");
- builder.append("\n)");
- return builder.toString();
+ return "create catalog " + catalogName + " with (\n"
+ + " 'type' = 'hudi',\n"
+ + " 'catalog.path' = '"
+ + catalogPath + "'"
+ + "\n)";
}
public static String getFileSourceDDL(String tableName) {
@@ -175,8 +174,7 @@ public static String getCollectSinkDDL(String tableName, ResolvedSchema tableSch
}
builder.append("\n");
}
- final String withProps = ""
- + ") with (\n"
+ final String withProps = ") with (\n"
+ " 'connector' = '" + CollectSinkTableFactory.FACTORY_ID + "'\n"
+ ")";
builder.append(withProps);
diff --git a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java
index 76306f780646d..ae814acea1f5d 100644
--- a/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java
+++ b/hudi-examples/hudi-examples-flink/src/main/java/org/apache/hudi/examples/quickstart/utils/SchemaBuilder.java
@@ -34,8 +34,8 @@
* Builder for {@link ResolvedSchema}.
*/
public class SchemaBuilder {
- private List columns;
- private List watermarkSpecs;
+ private final List columns;
+ private final List watermarkSpecs;
private UniqueConstraint constraint;
public static SchemaBuilder instance() {
diff --git a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java
index f1529b6e03a28..31f93601f9275 100644
--- a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java
+++ b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieSparkBootstrapExample.java
@@ -34,7 +34,7 @@
public class HoodieSparkBootstrapExample {
- private static String tableType = HoodieTableType.MERGE_ON_READ.name();
+ private static final String TABLE_TYPE = HoodieTableType.MERGE_ON_READ.name();
public static void main(String[] args) throws Exception {
if (args.length < 5) {
diff --git a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java
index b8df6161ca454..457020036e7cf 100644
--- a/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java
+++ b/hudi-examples/hudi-examples-spark/src/main/java/org/apache/hudi/examples/spark/HoodieWriteClientExample.java
@@ -66,7 +66,7 @@ public class HoodieWriteClientExample {
private static final Logger LOG = LoggerFactory.getLogger(HoodieWriteClientExample.class);
- private static String tableType = HoodieTableType.COPY_ON_WRITE.name();
+ private static final String TABLE_TYPE = HoodieTableType.COPY_ON_WRITE.name();
public static void main(String[] args) throws Exception {
if (args.length < 2) {
@@ -87,7 +87,7 @@ public static void main(String[] args) throws Exception {
FileSystem fs = HadoopFSUtils.getFs(tablePath, jsc.hadoopConfiguration());
if (!fs.exists(path)) {
HoodieTableMetaClient.newTableBuilder()
- .setTableType(tableType)
+ .setTableType(TABLE_TYPE)
.setTableName(tableName)
.setPayloadClass(HoodieAvroPayload.class)
.initTable(HadoopFSUtils.getStorageConfWithCopy(jsc.hadoopConfiguration()), tablePath);
@@ -139,7 +139,7 @@ public static void main(String[] args) throws Exception {
client.deletePartitions(deleteList, newCommitTime);
// compaction
- if (HoodieTableType.valueOf(tableType) == HoodieTableType.MERGE_ON_READ) {
+ if (HoodieTableType.valueOf(TABLE_TYPE) == HoodieTableType.MERGE_ON_READ) {
Option instant = client.scheduleCompaction(Option.empty());
HoodieWriteMetadata> compactionMetadata = client.compact(instant.get());
client.commitCompaction(instant.get(), compactionMetadata.getCommitMetadata().get(), Option.empty());
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java
index 5a898760a282d..e223c3bf60b6f 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/bulk/sort/SortOperator.java
@@ -54,7 +54,7 @@ public class SortOperator extends TableStreamOperator
private GeneratedNormalizedKeyComputer gComputer;
private GeneratedRecordComparator gComparator;
- private Configuration conf;
+ private final Configuration conf;
private transient BinaryExternalSorter sorter;
private transient StreamRecordCollector collector;
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java
index 73c0d655fb724..e900af3c2500f 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/update/strategy/FlinkConsistentBucketUpdateStrategy.java
@@ -57,8 +57,8 @@ public class FlinkConsistentBucketUpdateStrategy
private static final Logger LOG = LoggerFactory.getLogger(FlinkConsistentBucketUpdateStrategy.class);
private boolean initialized = false;
- private List indexKeyFields;
- private Map> partitionToIdentifier;
+ private final List indexKeyFields;
+ private final Map> partitionToIdentifier;
private String lastRefreshInstant = HoodieTimeline.INIT_INSTANT_TS;
public FlinkConsistentBucketUpdateStrategy(HoodieFlinkWriteClient writeClient, List indexKeyFields) {
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java
index 2fe2867b75463..bb59f9961ab90 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/transform/ChainedTransformer.java
@@ -29,7 +29,7 @@
*/
public class ChainedTransformer implements Transformer {
- private List transformers;
+ private final List transformers;
public ChainedTransformer(List transformers) {
this.transformers = transformers;
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java
index 0feda05a4c773..9c6ec425ed519 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/ExpressionEvaluators.java
@@ -571,9 +571,9 @@ private static BigDecimal getBigDecimal(@NotNull Object value) {
} else if (value instanceof Double) {
// new BigDecimal() are used instead of BigDecimal.valueOf() due to
// receive exact decimal representation of the double's binary floating-point value
- return new BigDecimal((Double) value);
+ return BigDecimal.valueOf((Double) value);
} else if (value instanceof Float) {
- return new BigDecimal(((Float) value).doubleValue());
+ return BigDecimal.valueOf(((Float) value).doubleValue());
} else if (value instanceof Long) {
return new BigDecimal((Long) value);
} else if (value instanceof Integer) {
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
index 90f81289bd2b0..5c9cde00ba90e 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/HoodieTableSource.java
@@ -330,14 +330,10 @@ private String getSourceOperatorName(String operatorName) {
List fields = Arrays.stream(this.requiredPos)
.mapToObj(i -> schemaFieldNames[i])
.collect(Collectors.toList());
- StringBuilder sb = new StringBuilder();
- sb.append(operatorName)
- .append("(")
- .append("table=").append(Collections.singletonList(conf.getString(FlinkOptions.TABLE_NAME)))
- .append(", ")
- .append("fields=").append(fields)
- .append(")");
- return sb.toString();
+ return operatorName + "("
+ + "table=" + Collections.singletonList(conf.get(FlinkOptions.TABLE_NAME))
+ + ", " + "fields=" + fields
+ + ")";
}
@Nullable
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java
index 0628673b8e58e..80c33b76cf3c2 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FormatUtils.java
@@ -310,8 +310,7 @@ public static Option getRawValueWithAltKeys(org.apache.flink.configurati
public static boolean getBooleanWithAltKeys(org.apache.flink.configuration.Configuration conf,
ConfigProperty> configProperty) {
Option rawValue = getRawValueWithAltKeys(conf, configProperty);
- boolean defaultValue = configProperty.hasDefaultValue()
- ? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false;
+ boolean defaultValue = configProperty.hasDefaultValue() && Boolean.parseBoolean(configProperty.defaultValue().toString());
return rawValue.map(Boolean::parseBoolean).orElse(defaultValue);
}
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java
index a43bf1189fb33..a7f6ec2a2bd9b 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/lookup/HoodieLookupFunction.java
@@ -138,7 +138,7 @@ private void checkCacheReload() {
return;
}
// Determine whether to reload data by comparing instant
- if (currentCommit != null && latestCommitInstant.get().equals(currentCommit)) {
+ if (latestCommitInstant.get().equals(currentCommit)) {
LOG.info("Ignore loading data because the commit instant " + currentCommit + " has not changed.");
return;
}
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java
index e9d0310d4756d..1984a7c7baea5 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/FlinkWriteClients.java
@@ -208,7 +208,7 @@ public static HoodieWriteConfig getHoodieClientConfig(
).build())
.forTable(conf.getString(FlinkOptions.TABLE_NAME))
.withStorageConfig(HoodieStorageConfig.newBuilder()
- .logFileDataBlockMaxSize(conf.getInteger(FlinkOptions.WRITE_LOG_BLOCK_SIZE) * 1024 * 1024)
+ .logFileDataBlockMaxSize((long) conf.getInteger(FlinkOptions.WRITE_LOG_BLOCK_SIZE) * 1024 * 1024)
.logFileMaxSize(conf.getLong(FlinkOptions.WRITE_LOG_MAX_SIZE) * 1024 * 1024)
.parquetBlockSize(conf.getInteger(FlinkOptions.WRITE_PARQUET_BLOCK_SIZE) * 1024 * 1024)
.parquetPageSize(conf.getInteger(FlinkOptions.WRITE_PARQUET_PAGE_SIZE) * 1024 * 1024)
diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java
index 57463d913ccfa..f54abd4a16bbf 100644
--- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java
+++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/RowDataToAvroConverters.java
@@ -50,7 +50,7 @@
@Internal
public class RowDataToAvroConverters {
- private static Conversions.DecimalConversion decimalConversion = new Conversions.DecimalConversion();
+ private static final Conversions.DecimalConversion DECIMAL_CONVERSION = new Conversions.DecimalConversion();
// --------------------------------------------------------------------------------
// Runtime Converters
@@ -219,7 +219,7 @@ public Object convert(Schema schema, Object object) {
@Override
public Object convert(Schema schema, Object object) {
BigDecimal javaDecimal = ((DecimalData) object).toBigDecimal();
- return decimalConversion.toFixed(javaDecimal, schema, schema.getLogicalType());
+ return DECIMAL_CONVERSION.toFixed(javaDecimal, schema, schema.getLogicalType());
}
};
break;
diff --git a/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
index 95d8fd720d300..f828ae9dffa78 100644
--- a/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
+++ b/hudi-flink-datasource/hudi-flink1.18.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
@@ -29,8 +29,8 @@
public class HeapMapColumnVector extends AbstractHeapVector
implements WritableColumnVector, MapColumnVector {
- private WritableColumnVector keys;
- private WritableColumnVector values;
+ private final WritableColumnVector keys;
+ private final WritableColumnVector values;
public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) {
super(len);
diff --git a/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
index c01b8392893a7..a684f64adfb6b 100644
--- a/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
+++ b/hudi-flink-datasource/hudi-flink1.19.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
@@ -29,8 +29,8 @@
public class HeapMapColumnVector extends AbstractHeapVector
implements WritableColumnVector, MapColumnVector {
- private WritableColumnVector keys;
- private WritableColumnVector values;
+ private final WritableColumnVector keys;
+ private final WritableColumnVector values;
public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) {
super(len);
diff --git a/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java b/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
index 95d8fd720d300..f828ae9dffa78 100644
--- a/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
+++ b/hudi-flink-datasource/hudi-flink1.20.x/src/main/java/org/apache/hudi/table/format/cow/vector/HeapMapColumnVector.java
@@ -29,8 +29,8 @@
public class HeapMapColumnVector extends AbstractHeapVector
implements WritableColumnVector, MapColumnVector {
- private WritableColumnVector keys;
- private WritableColumnVector values;
+ private final WritableColumnVector keys;
+ private final WritableColumnVector values;
public HeapMapColumnVector(int len, WritableColumnVector keys, WritableColumnVector values) {
super(len);
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
index a1bae5deb7bf9..27cb63d58222a 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/config/DFSPropertiesConfiguration.java
@@ -73,7 +73,7 @@ public class DFSPropertiesConfiguration extends PropertiesConfig {
@Nullable
private final Configuration hadoopConfig;
- private StoragePath mainFilePath;
+ private final StoragePath mainFilePath;
// props read from user defined configuration file or input stream
private final HoodieConfig hoodieConfig;
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java
index 295e5163ed526..d05a07a74a455 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/AvroOrcUtils.java
@@ -147,13 +147,13 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch
final Utf8 utf8 = (Utf8) value;
bytes = utf8.getBytes();
} else if (value instanceof GenericData.EnumSymbol) {
- bytes = getUTF8Bytes(((GenericData.EnumSymbol) value).toString());
+ bytes = getUTF8Bytes(value.toString());
} else {
throw new IllegalStateException(String.format(
"Unrecognized type for Avro %s field value, which has type %s, value %s",
type.getCategory().getName(),
value.getClass().getName(),
- value.toString()
+ value
));
}
@@ -177,7 +177,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch
throw new IllegalStateException(String.format(
"Unrecognized type for Avro DATE field value, which has type %s, value %s",
value.getClass().getName(),
- value.toString()
+ value
));
}
dateColVec.vector[vectorPos] = daysSinceEpoch;
@@ -209,7 +209,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch
throw new IllegalStateException(String.format(
"Unrecognized type for Avro TIMESTAMP field value, which has type %s, value %s",
value.getClass().getName(),
- value.toString()
+ value
));
}
@@ -231,7 +231,7 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch
throw new IllegalStateException(String.format(
"Unrecognized type for Avro BINARY field value, which has type %s, value %s",
value.getClass().getName(),
- value.toString()
+ value
));
}
binaryColVec.setRef(vectorPos, binaryBytes, 0, binaryBytes.length);
@@ -338,13 +338,13 @@ public static void addToVector(TypeDescription type, ColumnVector colVector, Sch
throw new IllegalStateException(String.format(
"Failed to add value %s to union with type %s",
value == null ? "null" : value.toString(),
- type.toString()
+ type
));
}
break;
default:
- throw new IllegalArgumentException("Invalid TypeDescription " + type.toString() + ".");
+ throw new IllegalArgumentException("Invalid TypeDescription " + type + ".");
}
}
@@ -598,7 +598,7 @@ public static Object readFromVector(TypeDescription type, ColumnVector colVector
ColumnVector fieldVector = unionVector.fields[tag];
return readFromVector(type.getChildren().get(tag), fieldVector, avroSchema.getTypes().get(tag), vectorPos);
default:
- throw new HoodieIOException("Unrecognized TypeDescription " + type.toString());
+ throw new HoodieIOException("Unrecognized TypeDescription " + type);
}
}
@@ -811,7 +811,7 @@ public static Schema createAvroSchemaWithDefaultValue(TypeDescription orcSchema,
if (nullable) {
fields.add(new Schema.Field(field.name(), nullableSchema, null, NULL_VALUE));
} else {
- fields.add(new Schema.Field(field.name(), fieldSchema, null, (Object) null));
+ fields.add(new Schema.Field(field.name(), fieldSchema, null, null));
}
}
Schema schema = Schema.createRecord(recordName, null, null, false);
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
index 9f1347872e2c6..f4bac6284c9c0 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/HadoopConfigUtils.java
@@ -84,8 +84,7 @@ public static Option getRawValueWithAltKeys(Configuration conf,
public static boolean getBooleanWithAltKeys(Configuration conf,
ConfigProperty> configProperty) {
Option rawValue = getRawValueWithAltKeys(conf, configProperty);
- boolean defaultValue = configProperty.hasDefaultValue()
- ? Boolean.parseBoolean(configProperty.defaultValue().toString()) : false;
+ boolean defaultValue = configProperty.hasDefaultValue() && Boolean.parseBoolean(configProperty.defaultValue().toString());
return rawValue.map(Boolean::parseBoolean).orElse(defaultValue);
}
}
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
index 74bd60b398e67..fc84ea2585a93 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
@@ -275,7 +275,7 @@ public List> readColumnStatsFromMetadata(H
.filter(f -> columnList.contains(f.getPath().toDotString()))
.map(columnChunkMetaData -> {
Statistics stats = columnChunkMetaData.getStatistics();
- return (HoodieColumnRangeMetadata) HoodieColumnRangeMetadata.create(
+ return HoodieColumnRangeMetadata.create(
filePath.getName(),
columnChunkMetaData.getPath().toDotString(),
convertToNativeJavaType(
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java
index 68a28ab6989c2..fb76a4a7d5aea 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/BoundedFsDataInputStream.java
@@ -31,8 +31,8 @@
* Implementation of {@link FSDataInputStream} with bound check based on file size.
*/
public class BoundedFsDataInputStream extends FSDataInputStream {
- private FileSystem fs;
- private Path file;
+ private final FileSystem fs;
+ private final Path file;
private long fileLen = -1L;
public BoundedFsDataInputStream(FileSystem fs, Path file, InputStream in) {
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java
index b31d2a8ca08a9..c9d8fff3fbbd7 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieRetryWrapperFileSystem.java
@@ -36,7 +36,6 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.util.Progressable;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
@@ -46,11 +45,11 @@
*/
public class HoodieRetryWrapperFileSystem extends FileSystem {
- private FileSystem fileSystem;
- private long maxRetryIntervalMs;
- private int maxRetryNumbers;
- private long initialRetryIntervalMs;
- private String retryExceptionsList;
+ private final FileSystem fileSystem;
+ private final long maxRetryIntervalMs;
+ private final int maxRetryNumbers;
+ private final long initialRetryIntervalMs;
+ private final String retryExceptionsList;
public HoodieRetryWrapperFileSystem(FileSystem fs, long maxRetryIntervalMs, int maxRetryNumbers, long initialRetryIntervalMs, String retryExceptions) {
this.fileSystem = fs;
@@ -203,7 +202,7 @@ public boolean delete(Path f) throws IOException {
}
@Override
- public FileStatus[] listStatus(Path f) throws FileNotFoundException, IOException {
+ public FileStatus[] listStatus(Path f) throws IOException {
return new RetryHelper(maxRetryIntervalMs, maxRetryNumbers, initialRetryIntervalMs, retryExceptionsList).tryWith(() -> fileSystem.listStatus(f)).start();
}
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
index 64b827d2613e1..8a7b5f0d974f5 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieSerializableFileStatus.java
@@ -37,17 +37,17 @@
*/
public class HoodieSerializableFileStatus implements Serializable {
- private Path path;
- private long length;
- private Boolean isDir;
- private short blockReplication;
- private long blockSize;
- private long modificationTime;
- private long accessTime;
- private FsPermission permission;
- private String owner;
- private String group;
- private Path symlink;
+ private final Path path;
+ private final long length;
+ private final Boolean isDir;
+ private final short blockReplication;
+ private final long blockSize;
+ private final long modificationTime;
+ private final long accessTime;
+ private final FsPermission permission;
+ private final String owner;
+ private final String group;
+ private final Path symlink;
HoodieSerializableFileStatus(Path path, long length, boolean isDir, short blockReplication,
long blockSize, long modificationTime, long accessTime,
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
index 5d00a14fadbba..276b215fd18ee 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/HoodieWrapperFileSystem.java
@@ -93,7 +93,7 @@ public static void setMetricsRegistry(Registry registry, Registry registryMeta)
}
- private ConcurrentMap openStreams = new ConcurrentHashMap<>();
+ private final ConcurrentMap openStreams = new ConcurrentHashMap<>();
private FileSystem fileSystem;
private URI uri;
private ConsistencyGuard consistencyGuard = new NoOpConsistencyGuard();
@@ -1008,7 +1008,7 @@ public long getBytesWritten(Path file) {
}
// When the file is first written, we do not have a track of it
throw new IllegalArgumentException(
- file.toString() + " does not have a open stream. Cannot get the bytes written on the stream");
+ file + " does not have a open stream. Cannot get the bytes written on the stream");
}
public FileSystem getFileSystem() {
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
index 7831e76c88fc3..572e593f6d896 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/hadoop/fs/inline/InMemoryFileSystem.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.util.Progressable;
import java.io.ByteArrayOutputStream;
-import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -102,7 +101,7 @@ public boolean delete(Path path, boolean b) throws IOException {
}
@Override
- public FileStatus[] listStatus(Path inlinePath) throws FileNotFoundException, IOException {
+ public FileStatus[] listStatus(Path inlinePath) throws IOException {
throw new UnsupportedOperationException("No support for listStatus");
}
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java
index 3fc95818bf273..6967f19246daa 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroHFileWriter.java
@@ -64,7 +64,7 @@
*/
public class HoodieAvroHFileWriter
implements HoodieAvroFileWriter {
- private static AtomicLong recordIndex = new AtomicLong(1);
+ private static final AtomicLong RECORD_INDEX_COUNT = new AtomicLong(1);
private final Path file;
private final HoodieHFileConfig hfileConfig;
private final boolean isWrapperFileSystem;
@@ -127,7 +127,7 @@ public HoodieAvroHFileWriter(String instantTime, StoragePath file, HoodieHFileCo
public void writeAvroWithMetadata(HoodieKey key, IndexedRecord avroRecord) throws IOException {
if (populateMetaFields) {
prepRecordWithMetadata(key, avroRecord, instantTime,
- taskContextSupplier.getPartitionIdSupplier().get(), recordIndex.getAndIncrement(), file.getName());
+ taskContextSupplier.getPartitionIdSupplier().get(), RECORD_INDEX_COUNT.getAndIncrement(), file.getName());
writeAvro(key.getRecordKey(), avroRecord);
} else {
writeAvro(key.getRecordKey(), avroRecord);
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java
index 0516caad9ee52..e3ffc69c1a84f 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieAvroOrcWriter.java
@@ -68,7 +68,7 @@ public class HoodieAvroOrcWriter implements HoodieAvroFileWriter, Closeable {
private final String instantTime;
private final TaskContextSupplier taskContextSupplier;
- private HoodieOrcConfig orcConfig;
+ private final HoodieOrcConfig orcConfig;
private String minRecordKey;
private String maxRecordKey;
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
index 8f17fa0fa1e19..ed20f0bca1149 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/io/hadoop/HoodieBaseParquetWriter.java
@@ -100,7 +100,7 @@ protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuil
hadoopConf.forEach(conf -> {
String key = conf.getKey();
if (key.startsWith(BLOOM_FILTER_ENABLED)) {
- String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1, key.length());
+ String column = key.substring(BLOOM_FILTER_ENABLED.length() + 1);
try {
Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterEnabled", String.class, boolean.class);
method.invoke(parquetWriterbuilder, column, Boolean.valueOf(conf.getValue()).booleanValue());
@@ -109,7 +109,7 @@ protected void handleParquetBloomFilters(ParquetWriter.Builder parquetWriterbuil
}
}
if (key.startsWith(BLOOM_FILTER_EXPECTED_NDV)) {
- String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1, key.length());
+ String column = key.substring(BLOOM_FILTER_EXPECTED_NDV.length() + 1);
try {
Method method = parquetWriterbuilder.getClass().getMethod("withBloomFilterNDV", String.class, long.class);
method.invoke(parquetWriterbuilder, column, Long.valueOf(conf.getValue()).longValue());
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java
index 3414f5fdea881..5b32dd435a8be 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieHFileRecordReader.java
@@ -50,10 +50,10 @@
public class HoodieHFileRecordReader implements RecordReader {
private long count = 0;
- private ArrayWritable valueObj;
+ private final ArrayWritable valueObj;
private HoodieFileReader reader;
private ClosableIterator> recordIterator;
- private Schema schema;
+ private final Schema schema;
public HoodieHFileRecordReader(Configuration conf, InputSplit split, JobConf job) throws IOException {
FileSplit fileSplit = (FileSplit) split;
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
index c301130a84018..3b6b3940489dd 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieROTablePathFilter.java
@@ -74,7 +74,7 @@ public class HoodieROTablePathFilter implements Configurable, PathFilter, Serial
* Its quite common, to have all files from a given partition path be passed into accept(), cache the check for hoodie
* metadata for known partition paths and the latest versions of files.
*/
- private Map> hoodiePathCache;
+ private final Map> hoodiePathCache;
/**
* Paths that are known to be non-hoodie tables.
@@ -214,7 +214,7 @@ public boolean accept(Path path) {
if (!hoodiePathCache.containsKey(folder.toString())) {
hoodiePathCache.put(folder.toString(), new HashSet<>());
}
- LOG.info("Based on hoodie metadata from base path: " + baseDir.toString() + ", caching " + latestFiles.size()
+ LOG.info("Based on hoodie metadata from base path: " + baseDir + ", caching " + latestFiles.size()
+ " files under " + folder);
for (HoodieBaseFile lfile : latestFiles) {
hoodiePathCache.get(folder.toString()).add(new Path(lfile.getPath()));
@@ -229,7 +229,7 @@ public boolean accept(Path path) {
} catch (TableNotFoundException e) {
// Non-hoodie path, accept it.
if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder.toString(), baseDir.toString()));
+ LOG.debug(String.format("(1) Caching non-hoodie path under %s with basePath %s \n", folder, baseDir));
}
nonHoodiePathCache.add(folder.toString());
nonHoodiePathCache.add(baseDir.toString());
@@ -242,7 +242,7 @@ public boolean accept(Path path) {
} else {
// files is at < 3 level depth in FS tree, can't be hoodie dataset
if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder.toString()));
+ LOG.debug(String.format("(2) Caching non-hoodie path under %s \n", folder));
}
nonHoodiePathCache.add(folder.toString());
return true;
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java
index f0c2e6a1fe2f5..88e96d29e1be2 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/InputPathHandler.java
@@ -60,7 +60,7 @@ public class InputPathHandler {
private final Map> groupedIncrementalPaths;
private final List snapshotPaths;
private final List nonHoodieInputPaths;
- private boolean isIncrementalUseDatabase;
+ private final boolean isIncrementalUseDatabase;
public InputPathHandler(Configuration conf, Path[] inputPaths, List incrementalTables) throws IOException {
this.conf = conf;
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java
index c31041ddc76b0..b5a8e0138826d 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/avro/HoodieAvroParquetReader.java
@@ -48,7 +48,7 @@
public class HoodieAvroParquetReader extends RecordReader {
private final ParquetRecordReader parquetRecordReader;
- private Schema baseSchema;
+ private final Schema baseSchema;
public HoodieAvroParquetReader(InputSplit inputSplit, Configuration conf) throws IOException {
// get base schema
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
index 99c9ecd210e47..445531b3f2f5b 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineHiveInputFormat.java
@@ -759,11 +759,9 @@ public String[] getLocations() throws IOException {
*/
@Override
public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append(inputSplitShim.toString());
- sb.append("InputFormatClass: " + inputFormatClassName);
- sb.append("\n");
- return sb.toString();
+ return inputSplitShim.toString()
+ + "InputFormatClass: " + inputFormatClassName
+ + "\n";
}
/**
@@ -829,8 +827,7 @@ public boolean equals(Object o) {
if (o instanceof CombinePathInputFormat) {
CombinePathInputFormat mObj = (CombinePathInputFormat) o;
return (opList.equals(mObj.opList)) && (inputFormatClassName.equals(mObj.inputFormatClassName))
- && (deserializerClassName == null ? (mObj.deserializerClassName == null)
- : deserializerClassName.equals(mObj.deserializerClassName));
+ && (Objects.equals(deserializerClassName, mObj.deserializerClassName));
}
return false;
}
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java
index a30aa178f1236..5636339b4d228 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/hive/HoodieCombineRealtimeFileSplit.java
@@ -51,11 +51,11 @@ public HoodieCombineRealtimeFileSplit() {
public HoodieCombineRealtimeFileSplit(JobConf jobConf, List realtimeFileSplits) {
super(jobConf, realtimeFileSplits.stream().map(p ->
- ((HoodieRealtimeFileSplit) p).getPath()).collect(Collectors.toList()).toArray(new
+ p.getPath()).collect(Collectors.toList()).toArray(new
Path[realtimeFileSplits.size()]),
- ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> ((HoodieRealtimeFileSplit) p).getStart())
+ ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> p.getStart())
.collect(Collectors.toList()).toArray(new Long[realtimeFileSplits.size()])),
- ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> ((HoodieRealtimeFileSplit) p).getLength())
+ ArrayUtils.toPrimitive(realtimeFileSplits.stream().map(p -> p.getLength())
.collect(Collectors.toList()).toArray(new Long[realtimeFileSplits.size()])),
realtimeFileSplits.stream().map(p -> {
try {
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
index a0df73c34e218..3b36974a0c953 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/AbstractRealtimeRecordReader.java
@@ -76,7 +76,7 @@ public abstract class AbstractRealtimeRecordReader {
protected boolean supportPayload;
// handle hive type to avro record
protected HiveAvroSerializer serializer;
- private boolean supportTimestamp;
+ private final boolean supportTimestamp;
public AbstractRealtimeRecordReader(RealtimeSplit split, JobConf job) {
this.split = split;
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java
index a663bbc6ba73e..84d8868f5f3b9 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/realtime/HoodieCombineRealtimeRecordReader.java
@@ -44,9 +44,9 @@
*/
public class HoodieCombineRealtimeRecordReader implements RecordReader {
- private static final transient Logger LOG = LoggerFactory.getLogger(HoodieCombineRealtimeRecordReader.class);
+ private static final Logger LOG = LoggerFactory.getLogger(HoodieCombineRealtimeRecordReader.class);
// RecordReaders for each split
- private List recordReaders = new LinkedList<>();
+ private final List recordReaders = new LinkedList<>();
// Points to the currently iterating record reader
private RecordReader currentRecordReader;
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java
index b4894c35d4189..981ab9ce54b3c 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieHiveUtils.java
@@ -139,7 +139,7 @@ public static List getIncrementalTableNames(JobContext job) {
Map tablesModeMap = job.getConfiguration()
.getValByRegex(HOODIE_CONSUME_MODE_PATTERN_STRING.pattern());
List result = tablesModeMap.entrySet().stream().map(s -> {
- if (s.getValue().trim().toUpperCase().equals(INCREMENTAL_SCAN_MODE)) {
+ if (s.getValue().trim().equalsIgnoreCase(INCREMENTAL_SCAN_MODE)) {
Matcher matcher = HOODIE_CONSUME_MODE_PATTERN_STRING.matcher(s.getKey());
return (!matcher.find() ? null : matcher.group(1));
}
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java
index aa24bd36fd92b..be7c363f61964 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieInputFormatUtils.java
@@ -27,8 +27,8 @@
import org.apache.hudi.common.model.HoodieFileFormat;
import org.apache.hudi.common.model.HoodiePartitionMetadata;
import org.apache.hudi.common.table.HoodieTableMetaClient;
-import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.table.timeline.TimelineUtils.HollowCommitHandling;
import org.apache.hudi.common.table.view.HoodieTableFileSystemView;
import org.apache.hudi.common.table.view.TableFileSystemView;
@@ -218,12 +218,12 @@ public static FileInputFormat getInputFormat(String path, boolean realtime, Conf
* @return
*/
public static HoodieTimeline filterInstantsTimeline(HoodieTimeline timeline) {
- HoodieTimeline commitsAndCompactionTimeline = (HoodieTimeline)timeline.getWriteTimeline();
+ HoodieTimeline commitsAndCompactionTimeline = timeline.getWriteTimeline();
Option pendingCompactionInstant = commitsAndCompactionTimeline
.filterPendingCompactionTimeline().firstInstant();
if (pendingCompactionInstant.isPresent()) {
- HoodieTimeline instantsTimeline = (HoodieTimeline)(commitsAndCompactionTimeline
- .findInstantsBefore(pendingCompactionInstant.get().requestedTime()));
+ HoodieTimeline instantsTimeline = commitsAndCompactionTimeline
+ .findInstantsBefore(pendingCompactionInstant.get().requestedTime());
int numCommitsFilteredByCompaction = commitsAndCompactionTimeline.getCommitsTimeline().countInstants()
- instantsTimeline.getCommitsTimeline().countInstants();
LOG.info("Earliest pending compaction instant is: " + pendingCompactionInstant.get().requestedTime()
diff --git a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java
index b8308011fd887..fb92c70fd4a4b 100644
--- a/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java
+++ b/hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/utils/HoodieRealtimeInputFormatUtils.java
@@ -119,8 +119,7 @@ public static boolean requiredProjectionFieldsExistInConf(Configuration configur
&& readColNames.contains(HoodieRecord.PARTITION_PATH_METADATA_FIELD);
} else {
return readColNames.contains(hoodieVirtualKeyInfo.get().getRecordKeyField())
- && (hoodieVirtualKeyInfo.get().getPartitionPathField().isPresent() ? readColNames.contains(hoodieVirtualKeyInfo.get().getPartitionPathField().get())
- : true);
+ && (!hoodieVirtualKeyInfo.get().getPartitionPathField().isPresent() || readColNames.contains(hoodieVirtualKeyInfo.get().getPartitionPathField().get()));
}
}
diff --git a/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java b/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java
index d9c7c6e6f200b..68fe013a1432a 100644
--- a/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java
+++ b/hudi-io/src/main/java/org/apache/hudi/common/util/ComparableVersion.java
@@ -154,7 +154,7 @@ private static class StringItem
*/
private static final String RELEASE_VERSION_INDEX = String.valueOf(QUALIFIER_LIST.indexOf(""));
- private String value;
+ private final String value;
public StringItem(String value, boolean followedByDigit) {
if (followedByDigit && value.length() == 1) {
diff --git a/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java b/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java
index 2b9f1af11b433..58b4adefd33c4 100644
--- a/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java
+++ b/hudi-io/src/main/java/org/apache/hudi/common/util/StringUtils.java
@@ -299,7 +299,7 @@ public static String replace(String text, String searchString, String replacemen
increase *= (max < 0 ? 16 : (max > 64 ? 64 : max));
StringBuilder buf = new StringBuilder(text.length() + increase);
while (end != INDEX_NOT_FOUND) {
- buf.append(text.substring(start, end)).append(replacement);
+ buf.append(text, start, end).append(replacement);
start = end + replLength;
if (--max == 0) {
break;
diff --git a/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java b/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java
index 72a0ecec78bc6..284988cc4cdd2 100644
--- a/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java
+++ b/hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java
@@ -108,7 +108,7 @@ public int getId() {
INDEX_V1("IDXBLK)+", BlockCategory.INDEX);
public enum BlockCategory {
- DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN;
+ DATA, META, INDEX, BLOOM, ALL_CATEGORIES, UNKNOWN
}
private final byte[] magic;
diff --git a/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java b/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java
index 00e8594a0c83c..dc61280e1686c 100644
--- a/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java
+++ b/hudi-io/src/main/java/org/apache/hudi/storage/StorageSchemes.java
@@ -84,11 +84,11 @@ public enum StorageSchemes {
// Hopsworks File System
HOPSFS("hopsfs", false, true);
- private String scheme;
+ private final String scheme;
// null for uncertain if write is transactional, please update this for each FS
- private Boolean isWriteTransactional;
+ private final Boolean isWriteTransactional;
// null for uncertain if dfs support atomic create&delete, please update this for each FS
- private Boolean supportAtomicCreation;
+ private final Boolean supportAtomicCreation;
StorageSchemes(String scheme, Boolean isWriteTransactional, Boolean supportAtomicCreation) {
this.scheme = scheme;
diff --git a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java
index a40a6f73a2c15..4c35822772429 100644
--- a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java
+++ b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/kafka/KafkaConnectControlAgent.java
@@ -125,7 +125,7 @@ private void start() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
// Todo fetch the worker id or name instead of a uuid.
- props.put(ConsumerConfig.GROUP_ID_CONFIG, "hudi-control-group" + UUID.randomUUID().toString());
+ props.put(ConsumerConfig.GROUP_ID_CONFIG, "hudi-control-group" + UUID.randomUUID());
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
diff --git a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
index be7d866df92d9..8f904f8d87b6c 100644
--- a/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
+++ b/hudi-kafka-connect/src/main/java/org/apache/hudi/connect/utils/KafkaConnectUtils.java
@@ -55,7 +55,7 @@
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
-import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -123,7 +123,7 @@ public static int getLatestNumPartitions(String bootstrapServers, String topicNa
props.put("bootstrap.servers", bootstrapServers);
try {
AdminClient client = AdminClient.create(props);
- DescribeTopicsResult result = client.describeTopics(Arrays.asList(topicName));
+ DescribeTopicsResult result = client.describeTopics(Collections.singletonList(topicName));
Map> values = result.values();
KafkaFuture topicDescription = values.get(topicName);
int numPartitions = topicDescription.get().partitions().size();
diff --git a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java
index 2ec9dbb9a912d..2cef699cb3193 100644
--- a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java
+++ b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/common/table/view/HoodieMetaserverFileSystemView.java
@@ -29,10 +29,10 @@
* is specifically for hoodie table whose metadata is stored in the hoodie metaserver.
*/
public class HoodieMetaserverFileSystemView extends HoodieTableFileSystemView {
- private String databaseName;
- private String tableName;
+ private final String databaseName;
+ private final String tableName;
- private HoodieMetaserverClient metaserverClient;
+ private final HoodieMetaserverClient metaserverClient;
public HoodieMetaserverFileSystemView(HoodieTableMetaClient metaClient,
HoodieTimeline visibleActiveTimeline, HoodieMetaserverConfig config) {
diff --git a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java
index 2c8be6c30aab1..409c3aeece409 100644
--- a/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java
+++ b/hudi-platform-service/hudi-metaserver/hudi-metaserver-client/src/main/java/org/apache/hudi/metaserver/client/HoodieMetaserverClientImp.java
@@ -55,9 +55,9 @@ public class HoodieMetaserverClientImp implements HoodieMetaserverClient {
private final long retryDelayMs;
private boolean isConnected;
private boolean isLocal;
- private ThriftHoodieMetaserver.Iface client;
+ private final ThriftHoodieMetaserver.Iface client;
private TTransport transport;
- private DefaultInstantGenerator instantGenerator = new DefaultInstantGenerator();
+ private final DefaultInstantGenerator instantGenerator = new DefaultInstantGenerator();
public HoodieMetaserverClientImp(HoodieMetaserverConfig config) {
this.config = config;
diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java
index c8666711189a7..4cf4a4c2f162e 100644
--- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java
+++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/DataSourceUtils.java
@@ -229,7 +229,7 @@ public static HoodieWriteResult doWriteOperation(SparkRDDWriteClient client, Jav
case INSERT_OVERWRITE_TABLE:
return client.insertOverwriteTable(hoodieRecords, instantTime);
default:
- throw new HoodieException("Not a valid operation type for doWriteOperation: " + operation.toString());
+ throw new HoodieException("Not a valid operation type for doWriteOperation: " + operation);
}
}
diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java
index 3ba0474a34cb8..d8e0e5372e709 100644
--- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java
+++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/BaseWriterCommitMessage.java
@@ -29,7 +29,7 @@
*/
public class BaseWriterCommitMessage implements Serializable {
- private List writeStatuses;
+ private final List writeStatuses;
public BaseWriterCommitMessage(List writeStatuses) {
this.writeStatuses = writeStatuses;
diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java
index d9db9cd51d192..e79203b918a36 100644
--- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java
+++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/internal/DataSourceInternalWriterHelper.java
@@ -55,7 +55,7 @@ public class DataSourceInternalWriterHelper {
private final SparkRDDWriteClient writeClient;
private final HoodieTable hoodieTable;
private final WriteOperationType operationType;
- private Map extraMetadata;
+ private final Map extraMetadata;
public DataSourceInternalWriterHelper(String instantTime, HoodieWriteConfig writeConfig, StructType structType,
SparkSession sparkSession, StorageConfiguration> storageConf, Map extraMetadata) {
diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java
index c68bd60ba6344..7b7c08106bcac 100644
--- a/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java
+++ b/hudi-spark-datasource/hudi-spark-common/src/main/java/org/apache/hudi/sql/InsertMode.java
@@ -40,7 +40,7 @@ public enum InsertMode {
*/
NON_STRICT("non-strict");
- private String value;
+ private final String value;
InsertMode(String value) {
this.value = value;
diff --git a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java
index f7eb35090e0c6..404e3c7361077 100644
--- a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java
+++ b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/QuickstartUtils.java
@@ -65,7 +65,7 @@ public static class DataGenerator {
+ "{\"name\":\"fare\",\"type\": \"double\"}]}";
static Schema avroSchema = new Schema.Parser().parse(TRIP_EXAMPLE_SCHEMA);
- private static Random rand = new Random(46474747);
+ private static final Random RAND = new Random(46474747);
private final Map existingKeys;
private final String[] partitionPaths;
@@ -90,7 +90,7 @@ private static String generateRandomString() {
int stringLength = 3;
StringBuilder buffer = new StringBuilder(stringLength);
for (int i = 0; i < stringLength; i++) {
- int randomLimitedInt = leftLimit + (int) (rand.nextFloat() * (rightLimit - leftLimit + 1));
+ int randomLimitedInt = leftLimit + (int) (RAND.nextFloat() * (rightLimit - leftLimit + 1));
buffer.append((char) randomLimitedInt);
}
return buffer.toString();
@@ -107,11 +107,11 @@ public static GenericRecord generateGenericRecord(String rowKey, String riderNam
rec.put("ts", timestamp);
rec.put("rider", riderName);
rec.put("driver", driverName);
- rec.put("begin_lat", rand.nextDouble());
- rec.put("begin_lon", rand.nextDouble());
- rec.put("end_lat", rand.nextDouble());
- rec.put("end_lon", rand.nextDouble());
- rec.put("fare", rand.nextDouble() * 100);
+ rec.put("begin_lat", RAND.nextDouble());
+ rec.put("begin_lon", RAND.nextDouble());
+ rec.put("end_lat", RAND.nextDouble());
+ rec.put("end_lon", RAND.nextDouble());
+ rec.put("fare", RAND.nextDouble() * 100);
return rec;
}
@@ -146,7 +146,7 @@ public Stream generateInsertsStream(String randomString, Integer n
int currSize = getNumExistingKeys();
return IntStream.range(0, n).boxed().map(i -> {
- String partitionPath = partitionPaths[rand.nextInt(partitionPaths.length)];
+ String partitionPath = partitionPaths[RAND.nextInt(partitionPaths.length)];
HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
existingKeys.put(currSize + i, key);
numExistingKeys++;
@@ -184,7 +184,7 @@ public List generateUpdates(Integer n) {
String randomString = generateRandomString();
return IntStream.range(0, n).boxed().map(x -> {
try {
- return generateUpdateRecord(existingKeys.get(rand.nextInt(numExistingKeys)), randomString);
+ return generateUpdateRecord(existingKeys.get(RAND.nextInt(numExistingKeys)), randomString);
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
@@ -249,13 +249,12 @@ private static Option convertToString(HoodieRecord record) {
}
private static Option convertToString(String uuid, String partitionPath, Long ts) {
- StringBuffer stringBuffer = new StringBuffer();
- stringBuffer.append("{");
- stringBuffer.append("\"ts\": \"" + (ts == null ? "0.0" : ts) + "\",");
- stringBuffer.append("\"uuid\": \"" + uuid + "\",");
- stringBuffer.append("\"partitionpath\": \"" + partitionPath + "\"");
- stringBuffer.append("}");
- return Option.of(stringBuffer.toString());
+ String stringBuffer = "{"
+ + "\"ts\": \"" + (ts == null ? "0.0" : ts) + "\","
+ + "\"uuid\": \"" + uuid + "\","
+ + "\"partitionpath\": \"" + partitionPath + "\""
+ + "}";
+ return Option.of(stringBuffer);
}
public static List convertToStringList(List records) {
diff --git a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
index 0c9c12298cdc1..52b452d8a4be6 100644
--- a/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
+++ b/hudi-spark-datasource/hudi-spark/src/main/java/org/apache/hudi/cli/HDFSParquetImporterUtils.java
@@ -69,8 +69,6 @@
import java.util.ArrayList;
import java.util.List;
-import scala.Tuple2;
-
import static org.apache.hudi.common.util.StringUtils.fromUTF8Bytes;
/**
@@ -194,7 +192,7 @@ public JavaRDD> buildHoodieRecordsForImport(Ja
job.getConfiguration())
// To reduce large number of tasks.
.coalesce(16 * this.parallelism).map(entry -> {
- GenericRecord genericRecord = ((Tuple2) entry)._2();
+ GenericRecord genericRecord = ((scala.Tuple2) entry)._2();
Object partitionField = genericRecord.get(this.partitionKey);
if (partitionField == null) {
throw new HoodieIOException("partition key is missing. :" + this.partitionKey);
diff --git a/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java b/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java
index f1d10d2885586..554f137c0c1b2 100644
--- a/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java
+++ b/hudi-spark-datasource/hudi-spark3-common/src/main/java/org/apache/spark/sql/execution/datasources/parquet/Spark3HoodieVectorizedParquetRecordReader.java
@@ -17,10 +17,11 @@
package org.apache.spark.sql.execution.datasources.parquet;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hudi.client.utils.SparkInternalSchemaConverter;
import org.apache.hudi.common.util.collection.Pair;
+
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.spark.memory.MemoryMode;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector;
@@ -39,7 +40,7 @@
public class Spark3HoodieVectorizedParquetRecordReader extends VectorizedParquetRecordReader {
// save the col type change info.
- private Map> typeChangeInfos;
+ private final Map> typeChangeInfos;
private ColumnarBatch columnarBatch;
@@ -48,7 +49,7 @@ public class Spark3HoodieVectorizedParquetRecordReader extends VectorizedParquet
private ColumnVector[] columnVectors;
// The capacity of vectorized batch.
- private int capacity;
+ private final int capacity;
// If true, this class returns batches instead of rows.
private boolean returnColumnarBatch;
diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java
index 7d4eda29dc44c..e2f60ec97b0cc 100644
--- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java
+++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/ColumnNameXLator.java
@@ -24,11 +24,11 @@
public class ColumnNameXLator {
- private static Map xformMap = new HashMap<>();
+ private static final Map X_FORM_MAP = new HashMap<>();
public static String translateNestedColumn(String colName) {
Map.Entry entry;
- for (Iterator> ic = xformMap.entrySet().iterator(); ic.hasNext(); colName =
+ for (Iterator> ic = X_FORM_MAP.entrySet().iterator(); ic.hasNext(); colName =
colName.replaceAll(entry.getKey(), entry.getValue())) {
entry = ic.next();
}
@@ -45,6 +45,6 @@ public static String translate(String colName, boolean nestedColumn) {
}
static {
- xformMap.put("\\$", "_dollar_");
+ X_FORM_MAP.put("\\$", "_dollar_");
}
}
diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java
index baf905094d409..76297759892e5 100644
--- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java
+++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/util/HiveSchemaUtil.java
@@ -446,8 +446,8 @@ public static String generateCreateDDL(String tableName, MessageType storageSche
List partitionFields = new ArrayList<>();
for (String partitionKey : config.getSplitStrings(META_SYNC_PARTITION_FIELDS)) {
String partitionKeyWithTicks = tickSurround(partitionKey);
- partitionFields.add(new StringBuilder().append(partitionKeyWithTicks).append(" ")
- .append(getPartitionKeyType(hiveSchema, partitionKeyWithTicks)).toString());
+ partitionFields.add(partitionKeyWithTicks + " "
+ + getPartitionKeyType(hiveSchema, partitionKeyWithTicks));
}
String partitionsStr = String.join(",", partitionFields);
diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java
index f48208a439fd6..7a77cab2df1dd 100644
--- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java
+++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/hive/SchemaDifference.java
@@ -83,9 +83,9 @@ public static class Builder {
private final MessageType storageSchema;
private final Map tableSchema;
- private List deleteColumns;
- private Map updateColumnTypes;
- private Map addColumnTypes;
+ private final List deleteColumns;
+ private final Map updateColumnTypes;
+ private final Map addColumnTypes;
public Builder(MessageType storageSchema, Map tableSchema) {
this.storageSchema = storageSchema;
diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java
index 475a43a0bab04..bac20f851d8c7 100644
--- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java
+++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncConfig.java
@@ -201,7 +201,7 @@ public class HoodieSyncConfig extends HoodieConfig {
+ "obtained from Hudi's internal metadata table. Note, " + HoodieMetadataConfig.ENABLE + " must be set to true.");
private Configuration hadoopConf;
- private HoodieMetricsConfig metricsConfig;
+ private final HoodieMetricsConfig metricsConfig;
public HoodieSyncConfig(Properties props) {
this(props, HadoopConfigUtils.createHadoopConf(props));
diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java
index d7238fbe8bf98..109296d6b33a1 100644
--- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java
+++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncException.java
@@ -37,6 +37,6 @@ public HoodieSyncException(Throwable t) {
}
protected static String format(String message, Object... args) {
- return String.format(String.valueOf(message), (Object[]) args);
+ return String.format(String.valueOf(message), args);
}
}
diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java
index cbe729ae0301d..d810c396c7436 100644
--- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java
+++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/metrics/HoodieMetaSyncMetrics.java
@@ -43,7 +43,7 @@ public class HoodieMetaSyncMetrics {
private static final String RECREATE_TABLE_DURATION_MS_METRIC = "recreate_table_duration_ms";
// Metrics are shut down by the shutdown hook added in the Metrics class
private Metrics metrics;
- private HoodieMetricsConfig metricsConfig;
+ private final HoodieMetricsConfig metricsConfig;
private transient HoodieStorage storage;
private final String syncToolName;
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
index 481fa02d1e6c9..77db3af5c0c8c 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
@@ -55,10 +55,10 @@ public class TimelineService {
private int serverPort;
private final Config timelineServerConf;
private final StorageConfiguration> storageConf;
- private transient HoodieEngineContext context;
- private transient HoodieStorage storage;
+ private final transient HoodieEngineContext context;
+ private final transient HoodieStorage storage;
private transient Javalin app = null;
- private transient FileSystemViewManager fsViewsManager;
+ private final transient FileSystemViewManager fsViewsManager;
private transient RequestHandler requestHandler;
public int getServerPort() {
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
index ccddbed18eb24..8d99e85c1ef22 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
@@ -96,7 +96,7 @@ public class MarkerHandler extends Handler {
private final MarkerCreationDispatchingRunnable markerCreationDispatchingRunnable;
private final Object firstCreationRequestSeenLock = new Object();
private final Object earlyConflictDetectionLock = new Object();
- private transient HoodieEngineContext hoodieEngineContext;
+ private final transient HoodieEngineContext hoodieEngineContext;
private ScheduledFuture> dispatchingThreadFuture;
private boolean firstCreationRequestSeen;
private String currentMarkerDir = null;
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java
index d73d787a5dc0f..bc710af3a2d22 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/AsyncTimelineServerBasedDetectionStrategy.java
@@ -43,7 +43,7 @@ public class AsyncTimelineServerBasedDetectionStrategy extends TimelineServerBas
private static final Logger LOG = LoggerFactory.getLogger(AsyncTimelineServerBasedDetectionStrategy.class);
- private AtomicBoolean hasConflict = new AtomicBoolean(false);
+ private final AtomicBoolean hasConflict = new AtomicBoolean(false);
private ScheduledExecutorService asyncDetectorExecutor;
public AsyncTimelineServerBasedDetectionStrategy(String basePath, String markerDir, String markerName, Boolean checkCommitConflict) {
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java
index bce28e8ae9cd3..102203025f35e 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerBasedEarlyConflictDetectionRunnable.java
@@ -43,13 +43,13 @@
public class MarkerBasedEarlyConflictDetectionRunnable implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(MarkerBasedEarlyConflictDetectionRunnable.class);
- private MarkerHandler markerHandler;
- private String markerDir;
- private String basePath;
- private HoodieStorage storage;
- private AtomicBoolean hasConflict;
- private long maxAllowableHeartbeatIntervalInMs;
- private Set completedCommits;
+ private final MarkerHandler markerHandler;
+ private final String markerDir;
+ private final String basePath;
+ private final HoodieStorage storage;
+ private final AtomicBoolean hasConflict;
+ private final long maxAllowableHeartbeatIntervalInMs;
+ private final Set completedCommits;
private final boolean checkCommitConflict;
public MarkerBasedEarlyConflictDetectionRunnable(AtomicBoolean hasConflict, MarkerHandler markerHandler,
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
index 4204a06876f64..2a296f0290519 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
@@ -86,7 +86,7 @@ public class MarkerDirState implements Serializable {
private final Object markerCreationProcessingLock = new Object();
// Early conflict detection strategy if enabled
private final Option conflictDetectionStrategy;
- private transient HoodieEngineContext hoodieEngineContext;
+ private final transient HoodieEngineContext hoodieEngineContext;
// Last underlying file index used, for finding the next file index
// in a round-robin fashion
private int lastFileIndexUsed = -1;
@@ -378,6 +378,6 @@ private void flushMarkersToFile(int markerFileIndex) {
closeQuietly(bufferedWriter);
closeQuietly(outputStream);
}
- LOG.debug(markersFilePath.toString() + " written in " + timer.endTimer() + " ms");
+ LOG.debug(markersFilePath + " written in " + timer.endTimer() + " ms");
}
}
\ No newline at end of file
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
index 05b902087a660..73569dc672843 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HDFSParquetImporter.java
@@ -62,8 +62,6 @@
import java.util.Collections;
import java.util.List;
-import scala.Tuple2;
-
/**
* Loads data from Parquet Sources.
*
@@ -179,7 +177,7 @@ protected JavaRDD> buildHoodieRecordsForImport
job.getConfiguration())
// To reduce large number of tasks.
.coalesce(16 * cfg.parallelism).map(entry -> {
- GenericRecord genericRecord = ((Tuple2) entry)._2();
+ GenericRecord genericRecord = ((scala.Tuple2) entry)._2();
Object partitionField = genericRecord.get(cfg.partitionKey);
if (partitionField == null) {
throw new HoodieIOException("partition key is missing. :" + cfg.partitionKey);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
index b09c056c6fe57..d0dca29f43c72 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
@@ -47,12 +47,12 @@ public class HoodieCleaner {
/**
* Spark context.
*/
- private transient JavaSparkContext jssc;
+ private final transient JavaSparkContext jssc;
/**
* Bag of properties with source, hoodie client, key generator etc.
*/
- private TypedProperties props;
+ private final TypedProperties props;
public HoodieCleaner(Config cfg, JavaSparkContext jssc) {
this(cfg, jssc, UtilHelpers.buildProperties(jssc.hadoopConfiguration(), cfg.propsFilePath, cfg.configs));
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java
index 4cf59d58e2e1f..fbe3d5c702bbb 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCompactor.java
@@ -55,7 +55,7 @@ public class HoodieCompactor {
public static final String SCHEDULE_AND_EXECUTE = "scheduleandexecute";
private final Config cfg;
private transient FileSystem fs;
- private TypedProperties props;
+ private final TypedProperties props;
private final JavaSparkContext jsc;
private HoodieTableMetaClient metaClient;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java
index 5d7f9dae90a0f..f8d0d4c49ab40 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableValidator.java
@@ -110,7 +110,7 @@ public class HoodieDataTableValidator implements Serializable {
// Properties with source, hoodie client, key generator etc.
private TypedProperties props;
- private HoodieTableMetaClient metaClient;
+ private final HoodieTableMetaClient metaClient;
protected transient Option asyncDataTableValidateService;
@@ -351,7 +351,7 @@ public void doDataTableValidation() {
if (!danglingFiles.isEmpty()) {
LOG.error("Data table validation failed due to extra files found for completed commits " + danglingFiles.size());
- danglingFiles.forEach(entry -> LOG.error("Dangling file: " + entry.toString()));
+ danglingFiles.forEach(entry -> LOG.error("Dangling file: " + entry));
finalResult = false;
if (!cfg.ignoreFailed) {
throw new HoodieValidationException("Data table validation failed due to dangling files " + danglingFiles.size());
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java
index f82c8149e3649..e81d15ff67988 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDropPartitionsTool.java
@@ -114,7 +114,7 @@ public class HoodieDropPartitionsTool implements Serializable {
// config
private final Config cfg;
// Properties with source, hoodie client, key generator etc.
- private TypedProperties props;
+ private final TypedProperties props;
private final HoodieTableMetaClient metaClient;
@@ -286,7 +286,7 @@ public static void main(String[] args) {
try {
tool.run();
} catch (Throwable throwable) {
- LOG.error("Fail to run deleting table partitions for " + cfg.toString(), throwable);
+ LOG.error("Fail to run deleting table partitions for " + cfg, throwable);
} finally {
jsc.stop();
}
@@ -384,7 +384,7 @@ private void syncHive(HiveSyncConfig hiveSyncConfig) {
+ "). Hive metastore URL :"
+ hiveSyncConfig.getStringOrDefault(HiveSyncConfigHolder.HIVE_URL)
+ ", basePath :" + cfg.basePath);
- LOG.info("Hive Sync Conf => " + hiveSyncConfig.toString());
+ LOG.info("Hive Sync Conf => " + hiveSyncConfig);
FileSystem fs = HadoopFSUtils.getFs(cfg.basePath, jsc.hadoopConfiguration());
HiveConf hiveConf = new HiveConf();
if (!StringUtils.isNullOrEmpty(cfg.hiveHMSUris)) {
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java
index 7071e57c93096..90d958d540cdb 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieIndexer.java
@@ -93,7 +93,7 @@ public class HoodieIndexer {
static final String DROP_INDEX = "dropindex";
private final HoodieIndexer.Config cfg;
- private TypedProperties props;
+ private final TypedProperties props;
private final JavaSparkContext jsc;
private final HoodieTableMetaClient metaClient;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
index 8a8bf31a8ee9d..951411c2f8205 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
@@ -200,11 +200,11 @@ public class HoodieMetadataTableValidator implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(HoodieMetadataTableValidator.class);
// Spark context
- private transient JavaSparkContext jsc;
+ private final transient JavaSparkContext jsc;
// config
- private Config cfg;
+ private final Config cfg;
// Properties with source, hoodie client, key generator etc.
- private TypedProperties props;
+ private final TypedProperties props;
private final HoodieTableMetaClient metaClient;
@@ -212,7 +212,7 @@ public class HoodieMetadataTableValidator implements Serializable {
private final String taskLabels;
- private List throwables = new ArrayList<>();
+ private final List throwables = new ArrayList<>();
public HoodieMetadataTableValidator(JavaSparkContext jsc, Config cfg) {
this.jsc = jsc;
@@ -1444,11 +1444,8 @@ private boolean assertBaseFilesEquality(FileSlice fileSlice1, FileSlice fileSlic
return baseFile1.getFileName().equals(baseFile2.getFileName()) && baseFile1.getFileId().equals(baseFile2.getFileId())
&& baseFile1.getFileSize() == baseFile2.getFileSize();
} else {
- if (!fileSlice1.getBaseFile().isPresent() == fileSlice2.getBaseFile().isPresent()) {
- return false;
- }
+ return fileSlice1.getBaseFile().isPresent() == !fileSlice2.getBaseFile().isPresent();
}
- return true;
}
/**
@@ -1770,7 +1767,7 @@ public List> getSortedColumnStatsList(Stri
.collect(Collectors.toList());
} else {
FileFormatUtils formatUtils = HoodieIOFactory.getIOFactory(metaClient.getStorage()).getFileFormatUtils(HoodieFileFormat.PARQUET);
- return fileNames.stream().flatMap(filename -> {
+ return (List>) fileNames.stream().flatMap(filename -> {
if (filename.endsWith(HoodieFileFormat.PARQUET.getFileExtension())) {
return formatUtils.readColumnStatsFromMetadata(
metaClient.getStorage(),
@@ -1781,14 +1778,12 @@ public List> getSortedColumnStatsList(Stri
StoragePath storagePartitionPath = new StoragePath(metaClient.getBasePath(), partitionPath);
String filePath = new StoragePath(storagePartitionPath, filename).toString();
try {
- return ((List>) getLogFileColumnRangeMetadata(filePath, metaClient, allColumnNameList, Option.of(readerSchema),
+ return getLogFileColumnRangeMetadata(filePath, metaClient, allColumnNameList, Option.of(readerSchema),
metadataConfig.getMaxReaderBufferSize())
.stream()
// We need to convert file path and use only the file name instead of the complete file path
.map(m -> (HoodieColumnRangeMetadata) HoodieColumnRangeMetadata.create(filename, m.getColumnName(), m.getMinValue(), m.getMaxValue(),
- m.getNullCount(), m.getValueCount(), m.getTotalSize(), m.getTotalUncompressedSize()))
- .collect(Collectors.toList()))
- .stream();
+ m.getNullCount(), m.getValueCount(), m.getTotalSize(), m.getTotalUncompressedSize()));
} catch (IOException e) {
throw new HoodieIOException(String.format("Failed to get column stats for file: %s", filePath), e);
}
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java
index 5bc5867be6e7c..982434a1abf63 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieRepairTool.java
@@ -146,7 +146,7 @@ public class HoodieRepairTool {
// Repair config
private final Config cfg;
// Properties with source, hoodie client, key generator etc.
- private TypedProperties props;
+ private final TypedProperties props;
// Spark context
private final HoodieEngineContext context;
private final HoodieTableMetaClient metaClient;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java
index 7ab5b5747c6f0..fb608c9cb6681 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieTTLJob.java
@@ -47,7 +47,7 @@ public class HoodieTTLJob {
private final Config cfg;
private final TypedProperties props;
private final JavaSparkContext jsc;
- private HoodieTableMetaClient metaClient;
+ private final HoodieTableMetaClient metaClient;
public HoodieTTLJob(JavaSparkContext jsc, Config cfg) {
this(jsc, cfg, UtilHelpers.buildProperties(jsc.hadoopConfiguration(), cfg.propsFilePath, cfg.configs),
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
index 0df69a685e0fc..ce271fe113f86 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
@@ -109,11 +109,11 @@ public class TableSizeStats implements Serializable {
private static final String[] FILE_SIZE_UNITS = {"B", "KB", "MB", "GB", "TB"};
// Spark context
- private transient JavaSparkContext jsc;
+ private final transient JavaSparkContext jsc;
// config
- private Config cfg;
+ private final Config cfg;
// Properties with source, hoodie client, key generator etc.
- private TypedProperties props;
+ private final TypedProperties props;
public TableSizeStats(JavaSparkContext jsc, Config cfg) {
this.jsc = jsc;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java
index 75cc9df86d3a8..78bd0ae0228f2 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallback.java
@@ -49,9 +49,9 @@ public class HoodieWriteCommitKafkaCallback implements HoodieWriteCommitCallback
private static final Logger LOG = LoggerFactory.getLogger(HoodieWriteCommitKafkaCallback.class);
- private HoodieConfig hoodieConfig;
- private String bootstrapServers;
- private String topic;
+ private final HoodieConfig hoodieConfig;
+ private final String bootstrapServers;
+ private final String topic;
public HoodieWriteCommitKafkaCallback(HoodieWriteConfig config) {
this.hoodieConfig = config;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java
index 8e8af55a3c563..273bbb12f2284 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/checkpointing/KafkaConnectHdfsProvider.java
@@ -38,7 +38,7 @@
* Documentation: https://docs.confluent.io/current/connect/kafka-connect-hdfs/index.html
*/
public class KafkaConnectHdfsProvider extends InitialCheckPointProvider {
- private static String FILENAME_SEPARATOR = "[\\+\\.]";
+ private static final String FILENAME_SEPARATOR = "[\\+\\.]";
public KafkaConnectHdfsProvider(TypedProperties props) {
super(props);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
index cca551870d975..5e9dd5e8c40f6 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
@@ -70,7 +70,7 @@ public class TimelineServerPerf implements Serializable {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(TimelineServerPerf.class);
private final Config cfg;
- private transient TimelineService timelineServer;
+ private final transient TimelineService timelineServer;
private final boolean useExternalTimelineServer;
private String hostAddr;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java
index ef76a797ca508..9cbc57539e535 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/schema/JdbcbasedSchemaProvider.java
@@ -41,7 +41,7 @@
*/
public class JdbcbasedSchemaProvider extends SchemaProvider {
private Schema sourceSchema;
- private Map options = new HashMap<>();
+ private final Map options = new HashMap<>();
public JdbcbasedSchemaProvider(TypedProperties props, JavaSparkContext jssc) {
super(props, jssc);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
index 754c9e9fe607f..263dcf1e117cc 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
@@ -59,7 +59,7 @@ public enum SourceType {
protected transient SparkSession sparkSession;
protected transient Option sourceProfileSupplier;
protected int writeTableVersion;
- private transient SchemaProvider overriddenSchemaProvider;
+ private final transient SchemaProvider overriddenSchemaProvider;
private final SourceType sourceType;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java
index e4dc95e26ecc7..acb6e925681a5 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/debezium/DebeziumSource.java
@@ -192,7 +192,7 @@ public static Dataset convertDateColumns(Dataset dataset, Schema schem
}
}).map(Field::name).collect(Collectors.toList());
- LOG.info("Date fields: " + dateFields.toString());
+ LOG.info("Date fields: {}", dateFields);
for (String dateCol : dateFields) {
dataset = dataset.withColumn(dateCol, functions.col(dateCol).cast(DataTypes.DateType));
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
index e217d4612ad60..7789e9b3bb811 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
@@ -54,10 +54,10 @@ public class CloudDataFetcher implements Serializable {
private static final String EMPTY_STRING = "";
- private transient TypedProperties props;
- private transient JavaSparkContext sparkContext;
- private transient SparkSession sparkSession;
- private transient CloudObjectsSelectorCommon cloudObjectsSelectorCommon;
+ private final transient TypedProperties props;
+ private final transient JavaSparkContext sparkContext;
+ private final transient SparkSession sparkSession;
+ private final transient CloudObjectsSelectorCommon cloudObjectsSelectorCommon;
private static final Logger LOG = LoggerFactory.getLogger(CloudDataFetcher.class);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java
index 7458e3a59e00d..432e2994b799a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/DatePartitionPathSelector.java
@@ -71,7 +71,7 @@
*/
public class DatePartitionPathSelector extends DFSPathSelector {
- private static volatile Logger LOG = LoggerFactory.getLogger(DatePartitionPathSelector.class);
+ private static final Logger LOG = LoggerFactory.getLogger(DatePartitionPathSelector.class);
private final String dateFormat;
private final int datePartitionDepth;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
index e09e1c7be812d..d9b940097dbd3 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/KafkaOffsetGen.java
@@ -104,12 +104,10 @@ public static Map strToOffsets(String checkpointStr) {
public static String offsetsToStr(OffsetRange[] ranges) {
// merge the ranges by partition to maintain one offset range map to one topic partition.
ranges = mergeRangesByTopicPartition(ranges);
- StringBuilder sb = new StringBuilder();
// at least 1 partition will be present.
- sb.append(ranges[0].topic() + ",");
- sb.append(Arrays.stream(ranges).map(r -> String.format("%s:%d", r.partition(), r.untilOffset()))
- .collect(Collectors.joining(",")));
- return sb.toString();
+ return ranges[0].topic() + ","
+ + Arrays.stream(ranges).map(r -> String.format("%s:%d", r.partition(), r.untilOffset()))
+ .collect(Collectors.joining(","));
}
/**
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java
index 27aa906619817..a1c73d95800ef 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/gcs/MessageValidity.java
@@ -50,6 +50,6 @@ public Option getDescription() {
* */
public enum ProcessingDecision {
DO_PROCESS,
- DO_SKIP;
+ DO_SKIP
}
}
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
index cc26154555474..2d3c7be8030e3 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
@@ -54,9 +54,9 @@
import java.io.Serializable;
import java.util.HashMap;
-import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_HISTORY_PATH;
import static org.apache.hudi.common.table.HoodieTableConfig.PARTITION_METAFILE_USE_BASE_FORMAT;
import static org.apache.hudi.common.table.HoodieTableConfig.POPULATE_META_FIELDS;
+import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_HISTORY_PATH;
import static org.apache.hudi.common.table.HoodieTableConfig.TIMELINE_TIMEZONE;
import static org.apache.hudi.config.HoodieWriteConfig.PRECOMBINE_FIELD_NAME;
import static org.apache.hudi.config.HoodieWriteConfig.WRITE_TABLE_VERSION;
@@ -83,12 +83,12 @@ public class BootstrapExecutor implements Serializable {
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
*/
- private transient SchemaProvider schemaProvider;
+ private final transient SchemaProvider schemaProvider;
/**
* Spark context.
*/
- private transient JavaSparkContext jssc;
+ private final transient JavaSparkContext jssc;
/**
* Bag of properties with source, hoodie client, key generator etc.
@@ -108,9 +108,9 @@ public class BootstrapExecutor implements Serializable {
/**
* FileSystem instance.
*/
- private transient FileSystem fs;
+ private final transient FileSystem fs;
- private String bootstrapBasePath;
+ private final String bootstrapBasePath;
/**
* Bootstrap Executor.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
index 92c4a843c94ab..6dc7d20cb8a34 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
@@ -70,12 +70,12 @@
*/
public class HoodieMultiTableStreamer {
- private static Logger logger = LoggerFactory.getLogger(HoodieMultiTableStreamer.class);
+ private static final Logger LOG = LoggerFactory.getLogger(HoodieMultiTableStreamer.class);
- private List tableExecutionContexts;
- private transient JavaSparkContext jssc;
- private Set successTables;
- private Set failedTables;
+ private final List tableExecutionContexts;
+ private final transient JavaSparkContext jssc;
+ private final Set successTables;
+ private final Set failedTables;
public HoodieMultiTableStreamer(Config config, JavaSparkContext jssc) throws IOException {
this.tableExecutionContexts = new ArrayList<>();
@@ -119,7 +119,7 @@ private void checkIfTableConfigFileExists(String configFolder, FileSystem fs, St
//commonProps are passed as parameter which contain table to config file mapping
private void populateTableExecutionContextList(TypedProperties properties, String configFolder, FileSystem fs, Config config) throws IOException {
List tablesToBeIngested = getTablesToBeIngested(properties);
- logger.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested);
+ LOG.info("tables to be ingested via MultiTableDeltaStreamer : " + tablesToBeIngested);
TableExecutionContext executionContext;
for (String table : tablesToBeIngested) {
String[] tableWithDatabase = table.split("\\.");
@@ -270,11 +270,11 @@ public static void main(String[] args) throws IOException {
}
if (config.enableHiveSync) {
- logger.warn("--enable-hive-sync will be deprecated in a future release; please use --enable-sync instead for Hive syncing");
+ LOG.warn("--enable-hive-sync will be deprecated in a future release; please use --enable-sync instead for Hive syncing");
}
if (config.targetTableName != null) {
- logger.warn(String.format("--target-table is deprecated and will be removed in a future release due to it's useless;"
+ LOG.warn(String.format("--target-table is deprecated and will be removed in a future release due to it's useless;"
+ " please use %s to configure multiple target tables", HoodieStreamerConfig.TABLES_TO_BE_INGESTED.key()));
}
@@ -458,14 +458,14 @@ public void sync() {
new HoodieStreamer(context.getConfig(), jssc, Option.ofNullable(context.getProperties())).sync();
successTables.add(Helpers.getTableWithDatabase(context));
} catch (Exception e) {
- logger.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e);
+ LOG.error("error while running MultiTableDeltaStreamer for table: " + context.getTableName(), e);
failedTables.add(Helpers.getTableWithDatabase(context));
}
}
- logger.info("Ingestion was successful for topics: " + successTables);
+ LOG.info("Ingestion was successful for topics: " + successTables);
if (!failedTables.isEmpty()) {
- logger.info("Ingestion failed for topics: " + failedTables);
+ LOG.info("Ingestion failed for topics: " + failedTables);
}
}
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
index f784a7dba6dbd..5169b9a87c2b0 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
@@ -655,12 +655,12 @@ public static class StreamSyncService extends HoodieIngestionService {
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
*/
- private transient SchemaProvider schemaProvider;
+ private final transient SchemaProvider schemaProvider;
/**
* Spark Session.
*/
- private transient SparkSession sparkSession;
+ private final transient SparkSession sparkSession;
/**
* Spark context Wrapper.
@@ -668,9 +668,9 @@ public static class StreamSyncService extends HoodieIngestionService {
private final transient HoodieSparkEngineContext hoodieSparkContext;
- private transient HoodieStorage storage;
+ private final transient HoodieStorage storage;
- private transient Configuration hiveConf;
+ private final transient Configuration hiveConf;
/**
* Bag of properties with source, hoodie client, key generator etc.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
index 12c869666e1f7..b5d457dcd7594 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
@@ -175,12 +175,12 @@ public class StreamSync implements Serializable, Closeable {
/**
* Source to pull deltas from.
*/
- private transient SourceFormatAdapter formatAdapter;
+ private final transient SourceFormatAdapter formatAdapter;
/**
* User Provided Schema Provider.
*/
- private transient SchemaProvider userProvidedSchemaProvider;
+ private final transient SchemaProvider userProvidedSchemaProvider;
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
@@ -190,14 +190,14 @@ public class StreamSync implements Serializable, Closeable {
/**
* Allows transforming source to target table before writing.
*/
- private transient Option transformer;
+ private final transient Option transformer;
- private String keyGenClassName;
+ private final String keyGenClassName;
/**
* Filesystem used.
*/
- private transient HoodieStorage storage;
+ private final transient HoodieStorage storage;
/**
* Spark context Wrapper.
@@ -207,12 +207,12 @@ public class StreamSync implements Serializable, Closeable {
/**
* Spark Session.
*/
- private transient SparkSession sparkSession;
+ private final transient SparkSession sparkSession;
/**
* Hive Config.
*/
- private transient Configuration conf;
+ private final transient Configuration conf;
/**
* Bag of properties with source, hoodie client, key generator etc.
@@ -224,7 +224,7 @@ public class StreamSync implements Serializable, Closeable {
/**
* Callback when write client is instantiated.
*/
- private transient Function onInitializingHoodieWriteClient;
+ private final transient Function onInitializingHoodieWriteClient;
/**
* Timeline with completed commits, including both .commit and .deltacommit.
@@ -1278,7 +1278,7 @@ public Option getClusteringInstantOpt() {
class WriteClientWriteResult {
private Map> partitionToReplacedFileIds = Collections.emptyMap();
- private JavaRDD writeStatusRDD;
+ private final JavaRDD writeStatusRDD;
public WriteClientWriteResult(JavaRDD writeStatusRDD) {
this.writeStatusRDD = writeStatusRDD;
From 9f473c4830cdb1c038628228f250eb5e76343303 Mon Sep 17 00:00:00 2001
From: vinoth chandar
Date: Thu, 19 Dec 2024 13:49:00 -0800
Subject: [PATCH 2/4] More minor fixes
---
.../java/org/apache/hudi/table/action/commit/BucketInfo.java | 3 +--
.../java/org/apache/hudi/table/action/commit/SmallFile.java | 3 +--
.../table/read/HoodiePositionBasedFileGroupRecordBuffer.java | 2 --
3 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
index 898522b8b4437..a59bd81218edd 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/BucketInfo.java
@@ -50,11 +50,10 @@ public String getPartitionPath() {
@Override
public String toString() {
- String sb = "BucketInfo {" + "bucketType=" + bucketType + ", "
+ return "BucketInfo {" + "bucketType=" + bucketType + ", "
+ "fileIdPrefix=" + fileIdPrefix + ", "
+ "partitionPath=" + partitionPath
+ '}';
- return sb;
}
@Override
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
index 9be817d96fb46..ef6bef69dbd07 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/SmallFile.java
@@ -32,9 +32,8 @@ public class SmallFile implements Serializable {
@Override
public String toString() {
- String sb = "SmallFile {" + "location=" + location + ", "
+ return "SmallFile {" + "location=" + location + ", "
+ "sizeBytes=" + sizeBytes
+ '}';
- return sb;
}
}
\ No newline at end of file
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java b/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java
index 274f9aea55c91..6d2ca28d7bbf0 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/table/read/HoodiePositionBasedFileGroupRecordBuffer.java
@@ -270,8 +270,6 @@ protected boolean shouldSkip(T record, String keyFieldName, boolean isFullKey, S
// When the record key matches with one of the keys or key prefixes, can not skip.
return (!isFullKey || !keys.contains(recordKey))
&& (isFullKey || keys.stream().noneMatch(recordKey::startsWith));
-
- // Otherwise, this record is not needed.
}
/**
From 1bc5fc079a30ac7404edb1e730e6369d25a2bae9 Mon Sep 17 00:00:00 2001
From: vinoth chandar
Date: Thu, 19 Dec 2024 16:14:42 -0800
Subject: [PATCH 3/4] CI compilation fail
---
.../src/main/java/org/apache/hudi/common/util/ParquetUtils.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
index fc84ea2585a93..74bd60b398e67 100644
--- a/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
+++ b/hudi-hadoop-common/src/main/java/org/apache/hudi/common/util/ParquetUtils.java
@@ -275,7 +275,7 @@ public List> readColumnStatsFromMetadata(H
.filter(f -> columnList.contains(f.getPath().toDotString()))
.map(columnChunkMetaData -> {
Statistics stats = columnChunkMetaData.getStatistics();
- return HoodieColumnRangeMetadata.create(
+ return (HoodieColumnRangeMetadata) HoodieColumnRangeMetadata.create(
filePath.getName(),
columnChunkMetaData.getPath().toDotString(),
convertToNativeJavaType(
From 04751a31a606ddd3b6c2c19c50ee962ff96647bb Mon Sep 17 00:00:00 2001
From: vinoth chandar
Date: Fri, 20 Dec 2024 07:42:10 -0800
Subject: [PATCH 4/4] Move away from `final transient` back to `transient`
---
.../aws/sync/AWSGlueCatalogSyncClient.java | 6 ++--
.../hudi/async/AsyncClusteringService.java | 2 +-
.../apache/hudi/async/HoodieAsyncService.java | 6 ++--
.../client/transaction/lock/LockManager.java | 2 +-
.../index/hbase/SparkHoodieHBaseIndex.java | 35 ++++++++++---------
.../SparkBootstrapCommitActionExecutor.java | 2 +-
.../apache/hudi/common/model/BaseFile.java | 2 +-
.../timeline/service/TimelineService.java | 6 ++--
.../service/handlers/MarkerHandler.java | 2 +-
.../handlers/marker/MarkerDirState.java | 2 +-
.../apache/hudi/utilities/HoodieCleaner.java | 2 +-
.../HoodieMetadataTableValidator.java | 2 +-
.../apache/hudi/utilities/TableSizeStats.java | 2 +-
.../utilities/perf/TimelineServerPerf.java | 2 +-
.../apache/hudi/utilities/sources/Source.java | 2 +-
.../sources/helpers/CloudDataFetcher.java | 8 ++---
.../utilities/streamer/BootstrapExecutor.java | 6 ++--
.../streamer/HoodieMultiTableStreamer.java | 2 +-
.../utilities/streamer/HoodieStreamer.java | 10 +++---
.../hudi/utilities/streamer/StreamSync.java | 14 ++++----
20 files changed, 58 insertions(+), 57 deletions(-)
diff --git a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
index 91857edc1784d..1609c846d0e89 100644
--- a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
+++ b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
@@ -71,6 +71,7 @@
import software.amazon.awssdk.services.glue.model.GetPartitionsRequest;
import software.amazon.awssdk.services.glue.model.GetPartitionsResponse;
import software.amazon.awssdk.services.glue.model.GetTableRequest;
+import software.amazon.awssdk.services.glue.model.KeySchemaElement;
import software.amazon.awssdk.services.glue.model.PartitionIndex;
import software.amazon.awssdk.services.glue.model.PartitionIndexDescriptor;
import software.amazon.awssdk.services.glue.model.PartitionInput;
@@ -690,7 +691,7 @@ public void managePartitionIndexes(String tableName) throws ExecutionException,
GetPartitionIndexesRequest indexesRequest = GetPartitionIndexesRequest.builder().databaseName(databaseName).tableName(tableName).build();
GetPartitionIndexesResponse existingIdxsResp = awsGlue.getPartitionIndexes(indexesRequest).get();
for (PartitionIndexDescriptor idsToDelete : existingIdxsResp.partitionIndexDescriptorList()) {
- LOG.warn("Dropping partition index: " + idsToDelete.indexName());
+ LOG.warn("Dropping partition index: {}", idsToDelete.indexName());
DeletePartitionIndexRequest idxToDelete = DeletePartitionIndexRequest.builder()
.databaseName(databaseName).tableName(tableName).indexName(idsToDelete.indexName()).build();
awsGlue.deletePartitionIndex(idxToDelete).get();
@@ -712,12 +713,11 @@ public void managePartitionIndexes(String tableName) throws ExecutionException,
// for each existing index remove if not relevant anymore
boolean indexesChanges = false;
for (PartitionIndexDescriptor existingIdx: existingIdxsResp.partitionIndexDescriptorList()) {
- List idxColumns = existingIdx.keys().stream().map(key -> key.name()).collect(Collectors.toList());
+ List idxColumns = existingIdx.keys().stream().map(KeySchemaElement::name).collect(Collectors.toList());
boolean toBeRemoved = true;
for (List neededIdx : partitionsIndexNeeded) {
if (neededIdx.equals(idxColumns)) {
toBeRemoved = false;
- break;
}
}
if (toBeRemoved) {
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
index ea3983e56ee8a..7b3884235010c 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/AsyncClusteringService.java
@@ -47,7 +47,7 @@ public abstract class AsyncClusteringService extends HoodieAsyncTableService {
private static final Logger LOG = LoggerFactory.getLogger(AsyncClusteringService.class);
private final int maxConcurrentClustering;
protected transient HoodieEngineContext context;
- private final transient BaseClusterer clusteringClient;
+ private transient BaseClusterer clusteringClient;
public AsyncClusteringService(HoodieEngineContext context, BaseHoodieWriteClient writeClient) {
this(context, writeClient, false);
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
index 410c04e83cb3e..989babfdcb7a1 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/async/HoodieAsyncService.java
@@ -57,11 +57,11 @@ public abstract class HoodieAsyncService implements Serializable {
// Run in daemon mode
private final boolean runInDaemonMode;
// Queue to hold pending compaction/clustering instants
- private final transient BlockingQueue pendingInstants = new LinkedBlockingQueue<>();
+ private transient BlockingQueue pendingInstants = new LinkedBlockingQueue<>();
// Mutex lock for synchronized access to pendingInstants queue
- private final transient ReentrantLock queueLock = new ReentrantLock();
+ private transient ReentrantLock queueLock = new ReentrantLock();
// Condition instance to use with the queueLock
- private final transient Condition consumed = queueLock.newCondition();
+ private transient Condition consumed = queueLock.newCondition();
protected HoodieAsyncService() {
this(false);
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
index bf751e4037b2a..57ed6df45afb4 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/lock/LockManager.java
@@ -53,7 +53,7 @@ public class LockManager implements Serializable, AutoCloseable {
private final int maxRetries;
private final long maxWaitTimeInMs;
private final RetryHelper lockRetryHelper;
- private final transient HoodieLockMetrics metrics;
+ private transient HoodieLockMetrics metrics;
private volatile LockProvider lockProvider;
public LockManager(HoodieWriteConfig writeConfig, HoodieStorage storage) {
diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java
index 28a14c7f04493..b6be1df61f1d3 100644
--- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java
+++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/index/hbase/SparkHoodieHBaseIndex.java
@@ -114,12 +114,13 @@ public class SparkHoodieHBaseIndex extends HoodieIndex {
private static final byte[] PARTITION_PATH_COLUMN = getUTF8Bytes("partition_path");
private static final Logger LOG = LoggerFactory.getLogger(SparkHoodieHBaseIndex.class);
- private static Connection hbaseConnection = null;
+ private static Connection HBASE_CONNECTION = null;
+ private static transient Thread SHUTDOWN_THREAD;
+
private HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator = null;
private int maxQpsPerRegionServer;
private long totalNumInserts;
private int numWriteStatusWithInserts;
- private static Thread shutdownThread;
/**
* multiPutBatchSize will be computed and re-set in updateLocation if
@@ -197,15 +198,15 @@ private Connection getHBaseConnection() {
* exits.
*/
private void addShutDownHook() {
- if (null == shutdownThread) {
- shutdownThread = new Thread(() -> {
+ if (null == SHUTDOWN_THREAD) {
+ SHUTDOWN_THREAD = new Thread(() -> {
try {
- hbaseConnection.close();
+ HBASE_CONNECTION.close();
} catch (Exception e) {
// fail silently for any sort of exception
}
});
- Runtime.getRuntime().addShutdownHook(shutdownThread);
+ Runtime.getRuntime().addShutdownHook(SHUTDOWN_THREAD);
}
}
@@ -244,12 +245,12 @@ private Function2>, Iterator> taggedRecords = new ArrayList<>();
- try (HTable hTable = (HTable) hbaseConnection.getTable(TableName.valueOf(tableName))) {
+ try (HTable hTable = (HTable) HBASE_CONNECTION.getTable(TableName.valueOf(tableName))) {
List statements = new ArrayList<>();
List currentBatchOfRecords = new LinkedList<>();
// Do the tagging.
@@ -340,15 +341,15 @@ private Function2, Iterator> updateL
List writeStatusList = new ArrayList<>();
// Grab the global HBase connection
synchronized (SparkHoodieHBaseIndex.class) {
- if (hbaseConnection == null || hbaseConnection.isClosed()) {
- hbaseConnection = getHBaseConnection();
+ if (HBASE_CONNECTION == null || HBASE_CONNECTION.isClosed()) {
+ HBASE_CONNECTION = getHBaseConnection();
}
}
final long startTimeForPutsTask = DateTime.now().getMillis();
LOG.info("startTimeForPutsTask for this task: " + startTimeForPutsTask);
final RateLimiter limiter = RateLimiter.create(multiPutBatchSize, TimeUnit.SECONDS);
- try (BufferedMutator mutator = hbaseConnection.getBufferedMutator(TableName.valueOf(tableName))) {
+ try (BufferedMutator mutator = HBASE_CONNECTION.getBufferedMutator(TableName.valueOf(tableName))) {
while (statusIterator.hasNext()) {
WriteStatus writeStatus = statusIterator.next();
List mutations = new ArrayList<>();
@@ -596,13 +597,13 @@ public boolean rollbackCommit(String instantTime) {
}
synchronized (SparkHoodieHBaseIndex.class) {
- if (hbaseConnection == null || hbaseConnection.isClosed()) {
- hbaseConnection = getHBaseConnection();
+ if (HBASE_CONNECTION == null || HBASE_CONNECTION.isClosed()) {
+ HBASE_CONNECTION = getHBaseConnection();
}
}
final RateLimiter limiter = RateLimiter.create(multiPutBatchSize, TimeUnit.SECONDS);
- try (HTable hTable = (HTable) hbaseConnection.getTable(TableName.valueOf(tableName));
- BufferedMutator mutator = hbaseConnection.getBufferedMutator(TableName.valueOf(tableName))) {
+ try (HTable hTable = (HTable) HBASE_CONNECTION.getTable(TableName.valueOf(tableName));
+ BufferedMutator mutator = HBASE_CONNECTION.getBufferedMutator(TableName.valueOf(tableName))) {
Long rollbackTime = TimelineUtils.parseDateFromInstantTime(instantTime).getTime();
Long currentTime = new Date().getTime();
Scan scan = new Scan();
@@ -682,7 +683,7 @@ public boolean isImplicitWithStorage() {
}
public void setHbaseConnection(Connection hbaseConnection) {
- SparkHoodieHBaseIndex.hbaseConnection = hbaseConnection;
+ SparkHoodieHBaseIndex.HBASE_CONNECTION = hbaseConnection;
}
/**
diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/bootstrap/SparkBootstrapCommitActionExecutor.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/bootstrap/SparkBootstrapCommitActionExecutor.java
index 4002d805945ac..9b842064000f6 100644
--- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/bootstrap/SparkBootstrapCommitActionExecutor.java
+++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/action/bootstrap/SparkBootstrapCommitActionExecutor.java
@@ -83,7 +83,7 @@ public class SparkBootstrapCommitActionExecutor
private static final Logger LOG = LoggerFactory.getLogger(SparkBootstrapCommitActionExecutor.class);
protected String bootstrapSchema = null;
- private final transient HoodieStorage bootstrapSourceStorage;
+ private transient HoodieStorage bootstrapSourceStorage;
public SparkBootstrapCommitActionExecutor(HoodieSparkEngineContext context,
HoodieWriteConfig config,
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/BaseFile.java b/hudi-common/src/main/java/org/apache/hudi/common/model/BaseFile.java
index 1c60cab875e27..01d1c6531001e 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/model/BaseFile.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/model/BaseFile.java
@@ -32,7 +32,7 @@ public class BaseFile implements Serializable {
private static final long serialVersionUID = 1L;
- private final transient StoragePathInfo pathInfo;
+ private transient StoragePathInfo pathInfo;
private final String fullPath;
protected final String fileName;
private long fileLen;
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
index 77db3af5c0c8c..481fa02d1e6c9 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/TimelineService.java
@@ -55,10 +55,10 @@ public class TimelineService {
private int serverPort;
private final Config timelineServerConf;
private final StorageConfiguration> storageConf;
- private final transient HoodieEngineContext context;
- private final transient HoodieStorage storage;
+ private transient HoodieEngineContext context;
+ private transient HoodieStorage storage;
private transient Javalin app = null;
- private final transient FileSystemViewManager fsViewsManager;
+ private transient FileSystemViewManager fsViewsManager;
private transient RequestHandler requestHandler;
public int getServerPort() {
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
index 8d99e85c1ef22..ccddbed18eb24 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/MarkerHandler.java
@@ -96,7 +96,7 @@ public class MarkerHandler extends Handler {
private final MarkerCreationDispatchingRunnable markerCreationDispatchingRunnable;
private final Object firstCreationRequestSeenLock = new Object();
private final Object earlyConflictDetectionLock = new Object();
- private final transient HoodieEngineContext hoodieEngineContext;
+ private transient HoodieEngineContext hoodieEngineContext;
private ScheduledFuture> dispatchingThreadFuture;
private boolean firstCreationRequestSeen;
private String currentMarkerDir = null;
diff --git a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
index 2a296f0290519..cd4ec27f10b6d 100644
--- a/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
+++ b/hudi-timeline-service/src/main/java/org/apache/hudi/timeline/service/handlers/marker/MarkerDirState.java
@@ -86,7 +86,7 @@ public class MarkerDirState implements Serializable {
private final Object markerCreationProcessingLock = new Object();
// Early conflict detection strategy if enabled
private final Option conflictDetectionStrategy;
- private final transient HoodieEngineContext hoodieEngineContext;
+ private transient HoodieEngineContext hoodieEngineContext;
// Last underlying file index used, for finding the next file index
// in a round-robin fashion
private int lastFileIndexUsed = -1;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
index d0dca29f43c72..8ccb745a6204b 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieCleaner.java
@@ -47,7 +47,7 @@ public class HoodieCleaner {
/**
* Spark context.
*/
- private final transient JavaSparkContext jssc;
+ private transient JavaSparkContext jssc;
/**
* Bag of properties with source, hoodie client, key generator etc.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
index 951411c2f8205..771cba994f492 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
@@ -200,7 +200,7 @@ public class HoodieMetadataTableValidator implements Serializable {
private static final Logger LOG = LoggerFactory.getLogger(HoodieMetadataTableValidator.class);
// Spark context
- private final transient JavaSparkContext jsc;
+ private transient JavaSparkContext jsc;
// config
private final Config cfg;
// Properties with source, hoodie client, key generator etc.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
index ce271fe113f86..90267c8c0b8d8 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/TableSizeStats.java
@@ -109,7 +109,7 @@ public class TableSizeStats implements Serializable {
private static final String[] FILE_SIZE_UNITS = {"B", "KB", "MB", "GB", "TB"};
// Spark context
- private final transient JavaSparkContext jsc;
+ private transient JavaSparkContext jsc;
// config
private final Config cfg;
// Properties with source, hoodie client, key generator etc.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
index 5e9dd5e8c40f6..cca551870d975 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/perf/TimelineServerPerf.java
@@ -70,7 +70,7 @@ public class TimelineServerPerf implements Serializable {
private static final long serialVersionUID = 1L;
private static final Logger LOG = LoggerFactory.getLogger(TimelineServerPerf.class);
private final Config cfg;
- private final transient TimelineService timelineServer;
+ private transient TimelineService timelineServer;
private final boolean useExternalTimelineServer;
private String hostAddr;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
index 263dcf1e117cc..754c9e9fe607f 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/Source.java
@@ -59,7 +59,7 @@ public enum SourceType {
protected transient SparkSession sparkSession;
protected transient Option sourceProfileSupplier;
protected int writeTableVersion;
- private final transient SchemaProvider overriddenSchemaProvider;
+ private transient SchemaProvider overriddenSchemaProvider;
private final SourceType sourceType;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
index 7789e9b3bb811..e217d4612ad60 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/sources/helpers/CloudDataFetcher.java
@@ -54,10 +54,10 @@ public class CloudDataFetcher implements Serializable {
private static final String EMPTY_STRING = "";
- private final transient TypedProperties props;
- private final transient JavaSparkContext sparkContext;
- private final transient SparkSession sparkSession;
- private final transient CloudObjectsSelectorCommon cloudObjectsSelectorCommon;
+ private transient TypedProperties props;
+ private transient JavaSparkContext sparkContext;
+ private transient SparkSession sparkSession;
+ private transient CloudObjectsSelectorCommon cloudObjectsSelectorCommon;
private static final Logger LOG = LoggerFactory.getLogger(CloudDataFetcher.class);
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
index 2d3c7be8030e3..a2aaeeb89e241 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/BootstrapExecutor.java
@@ -83,12 +83,12 @@ public class BootstrapExecutor implements Serializable {
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
*/
- private final transient SchemaProvider schemaProvider;
+ private transient SchemaProvider schemaProvider;
/**
* Spark context.
*/
- private final transient JavaSparkContext jssc;
+ private transient JavaSparkContext jssc;
/**
* Bag of properties with source, hoodie client, key generator etc.
@@ -108,7 +108,7 @@ public class BootstrapExecutor implements Serializable {
/**
* FileSystem instance.
*/
- private final transient FileSystem fs;
+ private transient FileSystem fs;
private final String bootstrapBasePath;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
index 6dc7d20cb8a34..0b5ed0ff26c49 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieMultiTableStreamer.java
@@ -73,7 +73,7 @@ public class HoodieMultiTableStreamer {
private static final Logger LOG = LoggerFactory.getLogger(HoodieMultiTableStreamer.class);
private final List tableExecutionContexts;
- private final transient JavaSparkContext jssc;
+ private transient JavaSparkContext jssc;
private final Set successTables;
private final Set failedTables;
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
index 5169b9a87c2b0..a23687d5c7d7a 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/HoodieStreamer.java
@@ -119,7 +119,7 @@ public class HoodieStreamer implements Serializable {
public static final String CHECKPOINT_KEY = HoodieWriteConfig.STREAMER_CHECKPOINT_KEY;
public static final String CHECKPOINT_RESET_KEY = STREAMER_CHECKPOINT_RESET_KEY_V1;
- protected final transient Config cfg;
+ protected transient Config cfg;
/**
* NOTE: These properties are already consolidated w/ CLI provided config-overrides.
@@ -655,12 +655,12 @@ public static class StreamSyncService extends HoodieIngestionService {
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
*/
- private final transient SchemaProvider schemaProvider;
+ private transient SchemaProvider schemaProvider;
/**
* Spark Session.
*/
- private final transient SparkSession sparkSession;
+ private transient SparkSession sparkSession;
/**
* Spark context Wrapper.
@@ -668,9 +668,9 @@ public static class StreamSyncService extends HoodieIngestionService {
private final transient HoodieSparkEngineContext hoodieSparkContext;
- private final transient HoodieStorage storage;
+ private transient HoodieStorage storage;
- private final transient Configuration hiveConf;
+ private transient Configuration hiveConf;
/**
* Bag of properties with source, hoodie client, key generator etc.
diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
index b5d457dcd7594..8413b1fac3183 100644
--- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
+++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/streamer/StreamSync.java
@@ -175,12 +175,12 @@ public class StreamSync implements Serializable, Closeable {
/**
* Source to pull deltas from.
*/
- private final transient SourceFormatAdapter formatAdapter;
+ private transient SourceFormatAdapter formatAdapter;
/**
* User Provided Schema Provider.
*/
- private final transient SchemaProvider userProvidedSchemaProvider;
+ private transient SchemaProvider userProvidedSchemaProvider;
/**
* Schema provider that supplies the command for reading the input and writing out the target table.
@@ -190,19 +190,19 @@ public class StreamSync implements Serializable, Closeable {
/**
* Allows transforming source to target table before writing.
*/
- private final transient Option transformer;
+ private transient Option transformer;
private final String keyGenClassName;
/**
* Filesystem used.
*/
- private final transient HoodieStorage storage;
+ private transient HoodieStorage storage;
/**
* Spark context Wrapper.
*/
- private final transient HoodieSparkEngineContext hoodieSparkContext;
+ private transient HoodieSparkEngineContext hoodieSparkContext;
/**
* Spark Session.
@@ -212,7 +212,7 @@ public class StreamSync implements Serializable, Closeable {
/**
* Hive Config.
*/
- private final transient Configuration conf;
+ private transient Configuration conf;
/**
* Bag of properties with source, hoodie client, key generator etc.
@@ -224,7 +224,7 @@ public class StreamSync implements Serializable, Closeable {
/**
* Callback when write client is instantiated.
*/
- private final transient Function onInitializingHoodieWriteClient;
+ private transient Function onInitializingHoodieWriteClient;
/**
* Timeline with completed commits, including both .commit and .deltacommit.