Skip to content

Commit be59bd0

Browse files
HIVE-28888: Fix spotbugs issues in hive-storage-api and hive-service-rpc with spotbugs-maven-plugin 4.8.6.6 (#5754)
1 parent a0465a9 commit be59bd0

File tree

51 files changed

+284
-171
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+284
-171
lines changed

Jenkinsfile

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -251,10 +251,7 @@ git merge origin/target
251251
}
252252
stage('Prechecks') {
253253
def spotbugsProjects = [
254-
":hive-common",
255-
":hive-shims",
256254
":hive-storage-api",
257-
":hive-standalone-metastore-common",
258255
":hive-service-rpc"
259256
]
260257
sh '''#!/bin/bash
@@ -267,7 +264,7 @@ if [ $n != 0 ]; then
267264
exit 1
268265
fi
269266
'''
270-
buildHive("-Pspotbugs -pl " + spotbugsProjects.join(",") + " -am test-compile com.github.spotbugs:spotbugs-maven-plugin:4.0.0:check")
267+
buildHive("-Pspotbugs -pl " + spotbugsProjects.join(",") + " -am test-compile com.github.spotbugs:spotbugs-maven-plugin:4.8.6.6:check")
271268
}
272269
stage('Compile') {
273270
buildHive("install -Dtest=noMatches")

pom.xml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@
222222
<json-path.version>2.9.0</json-path.version>
223223
<janino.version>3.1.12</janino.version>
224224
<datasketches.version>1.2.0</datasketches.version>
225-
<spotbugs.version>4.0.3</spotbugs.version>
225+
<spotbugs.version>4.8.6</spotbugs.version>
226226
<validation-api.version>1.1.0.Final</validation-api.version>
227227
<aws-secretsmanager-caching.version>1.0.1</aws-secretsmanager-caching.version>
228228
<aws-java-sdk.version>1.12.720</aws-java-sdk.version>
@@ -2034,11 +2034,11 @@
20342034
<id>spotbugs</id>
20352035
<build>
20362036
<plugins>
2037-
<!-- Execute as: com.github.spotbugs:spotbugs-maven-plugin:4.0.0:spotbugs -->
2037+
<!-- Execute as: com.github.spotbugs:spotbugs-maven-plugin:4.8.6.6:spotbugs -->
20382038
<plugin>
20392039
<groupId>com.github.spotbugs</groupId>
20402040
<artifactId>spotbugs-maven-plugin</artifactId>
2041-
<version>4.0.0</version>
2041+
<version>4.8.6.6</version>
20422042
<dependencies>
20432043
<!-- Specify the version of spotbugs -->
20442044
<dependency>
@@ -2061,7 +2061,7 @@
20612061
<plugin>
20622062
<groupId>com.github.spotbugs</groupId>
20632063
<artifactId>spotbugs-maven-plugin</artifactId>
2064-
<version>4.0.0</version>
2064+
<version>4.8.6.6</version>
20652065
<configuration>
20662066
<fork>true</fork>
20672067
<maxHeap>2048</maxHeap>

ql/src/java/org/apache/hadoop/hive/ql/ddl/table/info/desc/formatter/TextDescTableFormatter.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -292,7 +292,7 @@ private static MaterializationSnapshotFormatter createMaterializationSnapshotFor
292292
return String.format("SnapshotContext{snapshotId=%d}", snapshotContext.getSnapshotId());
293293
};
294294
} else if (snapshot != null && snapshot.getValidTxnList() != null) {
295-
ValidTxnWriteIdList validReaderWriteIdList = new ValidTxnWriteIdList(snapshot.getValidTxnList());
295+
ValidTxnWriteIdList validReaderWriteIdList = ValidTxnWriteIdList.fromValue(snapshot.getValidTxnList());
296296
return qualifiedTableName -> {
297297
ValidWriteIdList writeIdList = validReaderWriteIdList.getTableValidWriteIdList(qualifiedTableName);
298298
return writeIdList != null ? writeIdList.toString().replace(qualifiedTableName, "") : "Unknown";

ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@ private ValidWriteIdList extractValidWriteIdList() {
486486
if (currDesc.getTableName() == null || !org.apache.commons.lang3.StringUtils.isBlank(currDesc.getTableName())) {
487487
String txnString = job.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
488488
LOG.debug("FetchOperator get writeIdStr: " + txnString);
489-
return txnString == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
489+
return txnString == null ? new ValidReaderWriteIdList() : ValidReaderWriteIdList.fromValue(txnString);
490490
}
491491
return null; // not fetching from a table directly but from a temp location
492492
}

ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,7 @@ private HybridHashTableContainer(float keyCountAdj, int threshold, float loadFac
347347

348348
if (useBloomFilter) {
349349
if (newKeyCount <= BLOOM_FILTER_MAX_SIZE) {
350-
this.bloom1 = new BloomFilter(newKeyCount);
350+
this.bloom1 = BloomFilter.build(newKeyCount);
351351
} else {
352352
// To avoid having a huge BloomFilter we need to scale up False Positive Probability
353353
double fpp = calcFPP(newKeyCount);
@@ -356,7 +356,7 @@ private HybridHashTableContainer(float keyCountAdj, int threshold, float loadFac
356356
LOG.warn("BloomFilter FPP is greater than 0.5!");
357357
}
358358
LOG.info("BloomFilter is using FPP: " + fpp);
359-
this.bloom1 = new BloomFilter(newKeyCount, fpp);
359+
this.bloom1 = BloomFilter.build(newKeyCount, fpp);
360360
}
361361
LOG.info(String.format("Using a bloom-1 filter %d keys of size %d bytes",
362362
newKeyCount, bloom1.sizeInBytes()));

ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ public Object call() throws IOException {
127127
BloomKFilter bloomKFilter = BloomKFilter.deserialize(in);
128128
return bloomKFilter;
129129
} else {
130-
return new BloomKFilter(1);
130+
return BloomKFilter.build(1);
131131
}
132132
} finally {
133133
IOUtils.closeStream(in);

ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFBloomFilter.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ private static final class Aggregation implements AggregationBuffer {
6767
BloomKFilter bf;
6868

6969
public Aggregation(long expectedEntries) {
70-
bf = new BloomKFilter(expectedEntries);
70+
bf = BloomKFilter.build(expectedEntries);
7171
}
7272

7373
@Override

ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2287,7 +2287,7 @@ public static boolean isRemovedInsertOnlyTable(Set<String> removedSet) {
22872287
*/
22882288
public static ValidTxnWriteIdList getValidTxnWriteIdList(Configuration conf) {
22892289
String txnString = conf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
2290-
return new ValidTxnWriteIdList(txnString);
2290+
return ValidTxnWriteIdList.fromValue(txnString);
22912291
}
22922292

22932293
/**
@@ -2684,7 +2684,7 @@ public static List<Path> getValidDataPaths(Path dataPath, Configuration conf, St
26842684
}
26852685

26862686
// If ACID/MM tables, then need to find the valid state wrt to given ValidWriteIdList.
2687-
ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(validWriteIdStr);
2687+
ValidWriteIdList validWriteIdList = ValidReaderWriteIdList.fromValue(validWriteIdStr);
26882688
AcidDirectory acidInfo = AcidUtils.getAcidState(dataPath.getFileSystem(conf), dataPath, conf, validWriteIdList, null,
26892689
false);
26902690

ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -786,7 +786,7 @@ static class Context {
786786
? AcidOperationalProperties.parseString(txnProperties) : null;
787787

788788
String value = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
789-
writeIdList = value == null ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(value);
789+
writeIdList = value == null ? new ValidReaderWriteIdList() : ValidReaderWriteIdList.fromValue(value);
790790
LOG.info("Context:: " +
791791
"isAcid: {} " +
792792
"isVectorMode: {} " +
@@ -2097,7 +2097,7 @@ public RowReader<OrcStruct> getReader(InputSplit inputSplit,
20972097

20982098
String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
20992099
ValidWriteIdList validWriteIdList
2100-
= (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
2100+
= (txnString == null) ? new ValidReaderWriteIdList() : ValidReaderWriteIdList.fromValue(txnString);
21012101
if (LOG.isDebugEnabled()) {
21022102
LOG.debug("getReader:: Read ValidWriteIdList: " + validWriteIdList.toString()
21032103
+ " isTransactionalTable: " + HiveConf.getBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN));

ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcAcidRowBatchReader.java

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ private VectorizedOrcAcidRowBatchReader(JobConf conf, OrcSplit orcSplit, Reporte
261261
}
262262

263263
String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
264-
this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
264+
this.validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : ValidReaderWriteIdList.fromValue(txnString);
265265
LOG.info("Read ValidWriteIdList: " + this.validWriteIdList.toString()
266266
+ ":" + orcSplit);
267267

@@ -788,7 +788,7 @@ static OrcSplit.OffsetAndBucketProperty computeOffsetAndBucket(
788788

789789
String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
790790
ValidWriteIdList validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() :
791-
new ValidReaderWriteIdList(txnString);
791+
ValidReaderWriteIdList.fromValue(txnString);
792792

793793
long rowIdOffset = 0;
794794
OrcRawRecordMerger.TransactionMetaData syntheticTxnInfo =
@@ -1243,7 +1243,7 @@ static class SortMergedDeleteEventRegistry implements DeleteEventRegistry {
12431243
int bucket = AcidUtils.parseBucketId(orcSplit.getPath());
12441244
String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
12451245
this.validWriteIdList
1246-
= (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
1246+
= (txnString == null) ? new ValidReaderWriteIdList() : ValidReaderWriteIdList.fromValue(txnString);
12471247
LOG.debug("Using SortMergedDeleteEventRegistry");
12481248
Map<String, Integer> deltaToAttemptId = AcidUtils.getDeltaToAttemptIdMap(pathToDeltaMetaData, deleteDeltas, bucket);
12491249
OrcRawRecordMerger.Options mergerOptions = new OrcRawRecordMerger.Options().isDeleteReader(true);
@@ -1926,7 +1926,8 @@ public BothWriteIds done(int index) {
19261926
this.testMode = conf.getBoolean(ConfVars.HIVE_IN_TEST.varname, false);
19271927
int bucket = AcidUtils.parseBucketId(orcSplit.getPath());
19281928
String txnString = conf.get(ValidWriteIdList.VALID_WRITEIDS_KEY);
1929-
ValidWriteIdList validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList() : new ValidReaderWriteIdList(txnString);
1929+
ValidWriteIdList validWriteIdList = (txnString == null) ? new ValidReaderWriteIdList()
1930+
: ValidReaderWriteIdList.fromValue(txnString);
19301931
LOG.debug("Using ColumnizedDeleteEventRegistry");
19311932
this.sortMerger = new TreeMap<>();
19321933
this.rowIds = null;

0 commit comments

Comments
 (0)