Skip to content

Skip updating partition stats when adding empty stats partitions to Glue #25970

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 26, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1106,6 +1106,7 @@ public void addPartitions(String databaseName, String tableName, List<PartitionW

// statistics are created after partitions because it is not clear if ordering matters in Glue
var createStatisticsTasks = partitionsWithStatistics.stream()
.filter(partitionWithStatistics -> partitionWithStatistics.getStatistics() != PartitionStatistics.empty())
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this impact cases where we actually want statistics to be reset to empty ?
E.g. Insert overwrite on a partition with hive statistics collection flag disabled

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PartitionStatistics contains HiveBasicStatistics, this won't equal to the empty().

.map(partitionWithStatistics -> createUpdatePartitionStatisticsTasks(
StatisticsUpdateMode.OVERWRITE_ALL,
partitionWithStatistics.getPartition(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@
*/
package io.trino.plugin.hive.metastore.glue;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMultiset;
import com.google.common.collect.Multiset;
import com.google.common.collect.Sets;
import io.airlift.log.Logger;
import io.trino.Session;
import io.trino.plugin.hive.HiveQueryRunner;
Expand All @@ -26,29 +28,41 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.parallel.Execution;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Stream;

import static com.google.common.base.Preconditions.checkState;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.ImmutableMultiset.toImmutableMultiset;
import static com.google.common.collect.Iterators.getOnlyElement;
import static io.trino.plugin.hive.TestingHiveUtils.getConnectorService;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.CREATE_PARTITIONS;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.CREATE_TABLE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.DELETE_COLUMN_STATISTICS_FOR_PARTITION;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.DELETE_COLUMN_STATISTICS_FOR_TABLE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_COLUMN_STATISTICS_FOR_PARTITION;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_COLUMN_STATISTICS_FOR_TABLE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_DATABASE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_PARTITION;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_PARTITIONS;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_PARTITION_NAMES;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_TABLE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.GET_TABLES;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.UPDATE_COLUMN_STATISTICS_FOR_TABLE;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.UPDATE_PARTITION;
import static io.trino.plugin.hive.metastore.glue.GlueMetastoreMethod.UPDATE_TABLE;
import static io.trino.testing.MultisetAssertions.assertMultisetsEqual;
import static io.trino.testing.TestingNames.randomNameSuffix;
import static io.trino.testing.TestingSession.testSessionBuilder;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import static org.junit.jupiter.api.parallel.ExecutionMode.SAME_THREAD;

Expand All @@ -62,6 +76,7 @@ public class TestHiveGlueMetastoreAccessOperations
private final String testSchema = "test_schema_" + randomNameSuffix();

private GlueMetastoreStats glueStats;
private Path schemaDir;

@Override
protected QueryRunner createQueryRunner()
Expand All @@ -77,6 +92,7 @@ protected QueryRunner createQueryRunner()
.setCreateTpchSchemas(false)
.build();
queryRunner.execute("CREATE SCHEMA " + testSchema);
schemaDir = queryRunner.getCoordinator().getBaseDataDir().resolve("hive_data").resolve("glue").resolve(testSchema);
glueStats = getConnectorService(queryRunner, GlueHiveMetastore.class).getStats();
return queryRunner;
}
Expand All @@ -87,6 +103,76 @@ public void cleanUpSchema()
getQueryRunner().execute("DROP SCHEMA " + testSchema + " CASCADE");
}

@Test
void testInsertOverwriteStatisticsDisabled()
{
String tableName = "test_insert_overwrite_" + randomNameSuffix();

try {
assertUpdate("CREATE TABLE " + tableName + " (id INT, part INT) WITH (partitioned_by = ARRAY['part'])");
assertUpdate("INSERT INTO " + tableName + " VALUES (1, 1), (2, 1)", 2);

Session insertOverwriteSession = Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty("hive", "insert_existing_partitions_behavior", "OVERWRITE")
.setCatalogSessionProperty("hive", "collect_column_statistics_on_write", "false")
.setCatalogSessionProperty("hive", "statistics_enabled", "false")
.build();
assertInvocations(insertOverwriteSession, "INSERT INTO " + tableName + " VALUES (3, 1)",
ImmutableMultiset.<GlueMetastoreMethod>builder()
.add(DELETE_COLUMN_STATISTICS_FOR_PARTITION)
.add(GET_COLUMN_STATISTICS_FOR_PARTITION)
.add(GET_PARTITION)
.add(GET_TABLE)
.add(UPDATE_PARTITION)
.build(),
// We can't disable partition cache in glue v2, see GlueMetastoreModule#createGlueCache
// there is maybe 1 time additional call for the GET_PARTITION
ImmutableMultiset.<GlueMetastoreMethod>builder()
.add(GET_PARTITION)
.build());
assertQuery("SELECT * FROM " + tableName, "VALUES (3, 1)");
}
finally {
getQueryRunner().execute("DROP TABLE IF EXISTS " + tableName);
}
}

@Test
void testSyncPartitionMetadataProcedure()
throws IOException
{
String tableName = "test_sync_partition_metadata_" + randomNameSuffix();
try {
assertUpdate("CREATE TABLE " + tableName + " (id INT, part INT) WITH (partitioned_by = ARRAY['part'])");
assertUpdate("INSERT INTO " + tableName + " VALUES (1, 1)", 1);
Path tableDir = schemaDir.resolve(tableName);
Path sourcePartitionDir = tableDir.resolve("part=1");
Path sourceFile;
try (Stream<Path> paths = Files.list(sourcePartitionDir)) {
sourceFile = getOnlyElement(paths.iterator());
}

// prepare partition to sync
Path targetPartitionDir = tableDir.resolve("part=2");
Files.createDirectories(targetPartitionDir);
Files.copy(sourceFile, targetPartitionDir.resolve("data"));

// the sync_partition_metadata doesn't call UPDATE_COLUMN_STATISTICS_FOR_PARTITION
assertInvocations("CALL system.sync_partition_metadata('%s', '%s', 'FULL')".formatted(testSchema, tableName),
ImmutableMultiset.<GlueMetastoreMethod>builder()
.add(GET_TABLE)
.add(CREATE_PARTITIONS)
.addCopies(GET_PARTITION_NAMES, 5)
.build());

// the partition is successfully synced
assertQuery("SELECT * FROM " + tableName + " WHERE part = 2", "VALUES (1, 2)");
}
finally {
getQueryRunner().execute("DROP TABLE IF EXISTS " + tableName);
}
}

@Test
public void testUse()
{
Expand Down Expand Up @@ -491,6 +577,26 @@ private void assertInvocations(@Language("SQL") String query, Multiset<GlueMetas
}

private void assertInvocations(Session session, @Language("SQL") String query, Multiset<GlueMetastoreMethod> expectedGlueInvocations)
{
assertMultisetsEqual(getActualInvocations(session, query), expectedGlueInvocations);
}

private void assertInvocations(
Session session, @Language("SQL") String query,
Multiset<GlueMetastoreMethod> determinedExpectedGlueInvocations,
Multiset<GlueMetastoreMethod> possibleExpectedGlueInvocations)
{
Multiset<GlueMetastoreMethod> actualInvocations = getActualInvocations(session, query);
if (!mismatchMultisets(determinedExpectedGlueInvocations, actualInvocations).isEmpty()) {
Multiset<GlueMetastoreMethod> expectedGlueInvocations = ImmutableMultiset.<GlueMetastoreMethod>builder()
.addAll(determinedExpectedGlueInvocations)
.addAll(possibleExpectedGlueInvocations)
.build();
assertMultisetsEqual(expectedGlueInvocations, actualInvocations);
}
}

private Multiset<GlueMetastoreMethod> getActualInvocations(Session session, @Language("SQL") String query)
{
Map<GlueMetastoreMethod, Integer> countsBefore = Arrays.stream(GlueMetastoreMethod.values())
.collect(toImmutableMap(Function.identity(), method -> method.getInvocationCount(glueStats)));
Expand All @@ -500,9 +606,29 @@ private void assertInvocations(Session session, @Language("SQL") String query, M
Map<GlueMetastoreMethod, Integer> countsAfter = Arrays.stream(GlueMetastoreMethod.values())
.collect(toImmutableMap(Function.identity(), method -> method.getInvocationCount(glueStats)));

Multiset<GlueMetastoreMethod> actualGlueInvocations = Arrays.stream(GlueMetastoreMethod.values())
return Arrays.stream(GlueMetastoreMethod.values())
.collect(toImmutableMultiset(Function.identity(), method -> requireNonNull(countsAfter.get(method)) - requireNonNull(countsBefore.get(method))));
}

assertMultisetsEqual(actualGlueInvocations, expectedGlueInvocations);
private static List<String> mismatchMultisets(Multiset<?> actual, Multiset<?> expected)
{
if (expected.equals(actual)) {
return ImmutableList.of();
}

return Sets.union(expected.elementSet(), actual.elementSet()).stream()
.filter(key -> expected.count(key) != actual.count(key))
.flatMap(key -> {
int expectedCount = expected.count(key);
int actualCount = actual.count(key);
if (actualCount < expectedCount) {
return Stream.of(format("%s more occurrences of %s", expectedCount - actualCount, key));
}
if (actualCount > expectedCount) {
return Stream.of(format("%s fewer occurrences of %s", actualCount - expectedCount, key));
}
return Stream.of();
})
.collect(toImmutableList());
}
}