Skip to content

Partition spooled pages while encoding data to segments #26013

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
import static io.trino.operator.OutputSpoolingOperatorFactory.OutputSpoolingOperator.State.NEEDS_INPUT;
import static io.trino.operator.SpoolingController.Mode.INLINE;
import static io.trino.operator.SpoolingController.Mode.SPOOL;
import static io.trino.server.protocol.spooling.SpooledMetadataBlockSerde.serialize;
import static io.trino.server.protocol.spooling.SpoolingSessionProperties.getInitialSegmentSize;
import static io.trino.server.protocol.spooling.SpoolingSessionProperties.getMaxSegmentSize;
import static java.util.Objects.requireNonNull;
Expand Down Expand Up @@ -148,6 +149,10 @@ static class OutputSpoolingOperator

private final SpoolingController controller;
private final ZoneId clientZoneId;
private final AtomicLong spooledSegmentsCount = new AtomicLong();
private final AtomicLong inlinedSegmentsCount = new AtomicLong();
Comment on lines +152 to +153
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

those could be added in separate commit

private final long maxSegmentSize;
private final long minSegmentSize;

enum State
{
Expand Down Expand Up @@ -179,13 +184,16 @@ public OutputSpoolingOperator(OperatorContext operatorContext, QueryDataEncoder
new OperatorSpoolingController(
getInitialSegmentSize(operatorContext.getSession()).toBytes(),
getMaxSegmentSize(operatorContext.getSession()).toBytes()));

this.minSegmentSize = getInitialSegmentSize(operatorContext.getSession()).toBytes();
this.maxSegmentSize = getMaxSegmentSize(operatorContext.getSession()).toBytes();
this.aggregatedMemoryContext = operatorContext.newAggregateUserMemoryContext();
this.queryDataEncoder = requireNonNull(queryDataEncoder, "queryDataEncoder is null");
this.spoolingManager = requireNonNull(spoolingManager, "spoolingManager is null");
this.buffer = PageBuffer.create();
this.localMemoryContext = aggregatedMemoryContext.newLocalMemoryContext(OutputSpoolingOperator.class.getSimpleName());

operatorContext.setInfoSupplier(new OutputSpoolingInfoSupplier(spoolingTiming, controller, inlinedEncodedBytes, spooledEncodedBytes));
operatorContext.setInfoSupplier(new OutputSpoolingInfoSupplier(spoolingTiming, controller, inlinedEncodedBytes, spooledEncodedBytes, inlinedSegmentsCount, spooledSegmentsCount));
}

@Override
Expand All @@ -209,13 +217,13 @@ public void addInput(Page page)
outputPage = switch (controller.nextMode(page)) {
case SPOOL -> {
buffer.add(page);
yield outputBuffer(false);
yield spoolOrInline(false);
}
case BUFFER -> {
buffer.add(page);
yield null;
}
case INLINE -> inline(List.of(page));
case INLINE -> serialize(inline(List.of(page)));
};

if (outputPage != null) {
Expand Down Expand Up @@ -243,7 +251,7 @@ public Page getOutput()
public void finish()
{
if (state == NEEDS_INPUT) {
outputPage = outputBuffer(true);
outputPage = spoolOrInline(true);
if (outputPage != null) {
state = HAS_LAST_OUTPUT;
controller.finish();
Expand All @@ -260,59 +268,73 @@ public boolean isFinished()
return state == FINISHED;
}

private Page outputBuffer(boolean lastPage)
private Page spoolOrInline(boolean lastPage)
{
if (buffer.isEmpty()) {
return null;
}
List<List<Page>> partitions = SpoolingPagePartitioner.partition(buffer.removeAll(), maxSegmentSize);
ImmutableList.Builder<SpooledMetadataBlock> spooledMetadataBuilder = ImmutableList.builderWithExpectedSize(partitions.size());

for (int i = 0; i < partitions.size(); i++) {
boolean isLastPartition = i == partitions.size() - 1;
List<Page> partition = partitions.get(i);
long rows = reduce(partition, Page::getPositionCount);
long size = reduce(partition, Page::getSizeInBytes);

if (lastPage && isLastPartition && size < SPOOLING_THRESHOLD) {
// If the last partition is small enough, inline it to save the overhead of spooling
spooledMetadataBuilder.addAll(inline(partition));
continue;
}

if (lastPage && buffer.getSize() < SPOOLING_THRESHOLD) {
// If the buffer is small enough, inline it to save the overhead of spooling
return inline(buffer.removeAll());
}

return spool(buffer.removeAll());
}

private Page spool(List<Page> pages)
{
long rows = reduce(pages, Page::getPositionCount);
long size = reduce(pages, Page::getSizeInBytes);
SpooledSegmentHandle segmentHandle = spoolingManager.create(new SpoolingContext(
queryDataEncoder.encoding(),
operatorContext.getDriverContext().getSession().getQueryId(),
rows,
size));
if (!lastPage && isLastPartition && size < minSegmentSize) {
// Add remaining pages to the buffer to be spooled or inlined later
buffer.addAll(partition);
continue;
}

OperationTimer overallTimer = new OperationTimer(false);
try (OutputStream output = spoolingManager.createOutputStream(segmentHandle)) {
DataAttributes attributes = queryDataEncoder.encodeTo(output, pages)
.toBuilder()
.set(ROWS_COUNT, rows)
.set(EXPIRES_AT, ZonedDateTime.ofInstant(segmentHandle.expirationTime(), clientZoneId).toLocalDateTime().toString())
.build();
spooledEncodedBytes.addAndGet(attributes.get(SEGMENT_SIZE, Integer.class));
// This page is small (hundreds of bytes) so there is no point in tracking its memory usage
return SpooledMetadataBlock.forSpooledLocation(spoolingManager.location(segmentHandle), attributes).serialize();
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
overallTimer.end(spoolingTiming);
// Spool the partition as it is large enough
SpooledSegmentHandle segmentHandle = spoolingManager.create(new SpoolingContext(
queryDataEncoder.encoding(),
operatorContext.getDriverContext().getSession().getQueryId(),
rows,
size));

OperationTimer overallTimer = new OperationTimer(false);
try (OutputStream output = spoolingManager.createOutputStream(segmentHandle)) {
spooledSegmentsCount.incrementAndGet();
DataAttributes attributes = queryDataEncoder.encodeTo(output, partition)
.toBuilder()
.set(ROWS_COUNT, rows)
.set(EXPIRES_AT, ZonedDateTime.ofInstant(segmentHandle.expirationTime(), clientZoneId).toLocalDateTime().toString())
.build();
spooledEncodedBytes.addAndGet(attributes.get(SEGMENT_SIZE, Integer.class));
// This page is small (hundreds of bytes) so there is no point in tracking its memory usage
spooledMetadataBuilder.add(SpooledMetadataBlock.forSpooledLocation(spoolingManager.location(segmentHandle), attributes));
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
overallTimer.end(spoolingTiming);
}
}

return serialize(spooledMetadataBuilder.build());
}

private Page inline(List<Page> pages)
private List<SpooledMetadataBlock> inline(List<Page> pages)
{
inlinedSegmentsCount.incrementAndGet();
OperationTimer overallTimer = new OperationTimer(false);
try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
DataAttributes attributes = queryDataEncoder.encodeTo(output, pages)
.toBuilder()
.set(ROWS_COUNT, reduce(pages, Page::getPositionCount))
.build();
inlinedEncodedBytes.addAndGet(attributes.get(SEGMENT_SIZE, Integer.class));
return SpooledMetadataBlock.forInlineData(attributes, output.toByteArray()).serialize();
return ImmutableList.of(SpooledMetadataBlock.forInlineData(attributes, output.toByteArray()));
}
catch (IOException e) {
throw new UncheckedIOException(e);
Expand Down Expand Up @@ -363,6 +385,11 @@ public synchronized void add(Page page)
buffer.add(page);
}

public synchronized void addAll(List<Page> page)
{
buffer.addAll(page);
}

public boolean isEmpty()
{
return buffer.isEmpty();
Expand All @@ -387,7 +414,9 @@ private record OutputSpoolingInfoSupplier(
OperationTiming spoolingTiming,
SpoolingController controller,
AtomicLong inlinedEncodedBytes,
AtomicLong spooledEncodedBytes)
AtomicLong spooledEncodedBytes,
AtomicLong inlinedSegmentsCount,
AtomicLong spooledSegmentsCount)
implements Supplier<OutputSpoolingInfo>
{
private OutputSpoolingInfoSupplier
Expand All @@ -396,6 +425,8 @@ private record OutputSpoolingInfoSupplier(
requireNonNull(controller, "controller is null");
requireNonNull(inlinedEncodedBytes, "inlinedEncodedBytes is null");
requireNonNull(spooledEncodedBytes, "spooledEncodedBytes is null");
requireNonNull(inlinedSegmentsCount, "inlinedSegmentsCount is null");
requireNonNull(spooledSegmentsCount, "spooledSegmentsCount is null");
}

@Override
Expand All @@ -408,10 +439,12 @@ public OutputSpoolingInfo get()
succinctDuration(spoolingTiming.getWallNanos(), NANOSECONDS),
succinctDuration(spoolingTiming.getCpuNanos(), NANOSECONDS),
inlined.pages(),
inlinedSegmentsCount.get(),
inlined.positions(),
inlined.size(),
inlinedEncodedBytes.get(),
spooled.pages(),
spooledSegmentsCount.get(),
spooled.positions(),
spooled.size(),
spooledEncodedBytes.get());
Expand All @@ -422,10 +455,12 @@ public record OutputSpoolingInfo(
Duration spoolingWallTime,
Duration spoolingCpuTime,
long inlinedPages,
long inlinedSegments,
long inlinedPositions,
long inlinedRawBytes,
long inlinedEncodedBytes,
long spooledPages,
long spooledSegments,
long spooledPositions,
long spooledRawBytes,
long spooledEncodedBytes)
Expand All @@ -444,10 +479,12 @@ public OutputSpoolingInfo mergeWith(OutputSpoolingInfo other)
succinctDuration(spoolingWallTime.toMillis() + other.spoolingWallTime().toMillis(), MILLISECONDS),
succinctDuration(spoolingCpuTime.toMillis() + other.spoolingCpuTime().toMillis(), MILLISECONDS),
inlinedPages + other.inlinedPages(),
inlinedSegments + other.inlinedSegments,
inlinedPositions + other.inlinedPositions,
inlinedRawBytes + other.inlinedRawBytes,
inlinedEncodedBytes + other.inlinedEncodedBytes,
spooledPages + other.spooledPages,
spooledSegments + other.spooledSegments,
spooledPositions + other.spooledPositions,
spooledRawBytes + other.spooledRawBytes,
spooledEncodedBytes + other.spooledEncodedBytes);
Expand All @@ -472,10 +509,12 @@ public String toString()
.add("spoolingWallTime", spoolingWallTime)
.add("spoolingCpuTime", spoolingCpuTime)
.add("inlinedPages", inlinedPages)
.add("inlinedSegments", inlinedSegments)
.add("inlinedPositions", inlinedPositions)
.add("inlinedRawBytes", inlinedRawBytes)
.add("inlinedEncodedBytes", inlinedEncodedBytes)
.add("spooledPages", spooledPages)
.add("spooledSegments", spooledSegments)
.add("spooledPositions", spooledPositions)
.add("spooledRawBytes", spooledRawBytes)
.add("spooledEncodedBytes", spooledEncodedBytes)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.operator;

import com.google.common.collect.ImmutableList;
import io.trino.spi.Page;

import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.List;

import static com.google.common.base.Verify.verify;
import static io.trino.execution.buffer.PageSplitterUtil.splitPage;
import static java.lang.Math.clamp;

public class SpoolingPagePartitioner
{
static final double LOWER_BOUND = 0.05; // 5% of the target size
static final double UPPER_BOUND = 0.1; // 10% of the target size

private SpoolingPagePartitioner() {}

public static List<List<Page>> partition(List<Page> pages, long targetSize)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why operate on multiple pages

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We buffer multiple pages

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I get it - but why not partition page by page. Passing multiple pages makes interface more complex. Let me read deeper

{
Deque<Page> queue = new ArrayDeque<>(pages);
List<Page> currentPartition = new ArrayList<>();
ImmutableList.Builder<List<Page>> partitions = ImmutableList.builder();

while (!queue.isEmpty()) {
Page currentPage = queue.removeFirst();

long remainingSize = targetSize - size(currentPartition);
verify(remainingSize >= 0, "Current partition size %s is larger than target size %s", size(currentPartition), targetSize);

if (currentPage.getSizeInBytes() < remainingSize) {
currentPartition.add(currentPage);

if (withinThreshold(size(currentPartition), targetSize)) {
partitions.add(ImmutableList.copyOf(currentPartition));
currentPartition.clear();
}

continue;
}

List<Page> currentPartitioned = new ArrayList<>(takeFromHead(currentPage, remainingSize, targetSize));
currentPartition.add(currentPartitioned.removeFirst());

// Add the remaining split pages back to the queue in the original order
currentPartitioned.reversed().forEach(queue::addFirst);

if (withinThreshold(size(currentPartition), targetSize)) {
partitions.add(ImmutableList.copyOf(currentPartition));
currentPartition.clear();
}
}

// If there are any remaining pages in the current partition, add them as a final partition
if (!currentPartition.isEmpty()) {
partitions.add(ImmutableList.copyOf(currentPartition));
}

return partitions.build();
}

private static boolean withinThreshold(long page, long targetSize)
{
return page >= targetSize * (1 - LOWER_BOUND) && page <= targetSize * (1 + UPPER_BOUND);
}

private static List<Page> takeFromHead(Page page, long targetHeadSize, long tailSplitSize)
{
verify(page.getSizeInBytes() >= targetHeadSize, "Page size %s must be greater than head size %s", page.getSizeInBytes(), targetHeadSize);
ImmutableList.Builder<Page> builder = ImmutableList.builder();

int positions = positionsWithBytes(page, targetHeadSize);
builder.add(page.getRegion(0, positions));

if (positions == page.getPositionCount()) {
return builder.build();
}
builder.addAll(splitPage(page.getRegion(positions, page.getPositionCount() - positions), tailSplitSize));
return builder.build();
}

private static long averageSizePerPosition(Page page)
{
return clamp(page.getSizeInBytes() / (long) page.getPositionCount(), 1, Integer.MAX_VALUE);
}

private static int positionsWithBytes(Page page, long bytes)
{
long positions = bytes / averageSizePerPosition(page);
return clamp(positions, 1, page.getPositionCount());
}

private static long size(List<Page> pages)
{
return pages.stream().mapToLong(Page::getSizeInBytes).sum();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import io.airlift.slice.Slice;
import io.airlift.slice.Slices;
import io.trino.client.spooling.DataAttributes;
import io.trino.spi.Page;
import io.trino.spi.spool.SpooledLocation;
import io.trino.spi.spool.SpooledLocation.CoordinatorLocation;
import io.trino.spi.spool.SpooledLocation.DirectLocation;
Expand All @@ -32,11 +31,6 @@ public sealed interface SpooledMetadataBlock
{
DataAttributes attributes();

default Page serialize()
{
return SpooledMetadataBlockSerde.serialize(this);
}

static SpooledMetadataBlock forInlineData(DataAttributes attributes, byte[] data)
{
return new Inlined(attributes, Slices.wrappedBuffer(data));
Expand Down
Loading
Loading