diff --git a/Makefile b/Makefile index 187ee20b..bef1f6a0 100644 --- a/Makefile +++ b/Makefile @@ -9,16 +9,16 @@ TAP_TESTS = 0 OBJS = src/encryption/enc_tuple.o \ src/encryption/enc_aes.o \ -src/access/pg_tde_io.o \ -src/access/pg_tdeam_visibility.o \ +src/access/heapam_visibility.o \ +src/access/heapam_handler.o \ +src/access/heapam.o \ +src/access/heaptoast.o \ +src/access/hio.o \ +src/access/pruneheap.o \ +src/access/rewriteheap.o \ +src/access/vacuumlazy.o \ +src/access/visibilitymap.o \ src/access/pg_tde_tdemap.o \ -src/access/pg_tdeam.o \ -src/access/pg_tdetoast.o \ -src/access/pg_tde_prune.o \ -src/access/pg_tde_vacuumlazy.o \ -src/access/pg_tde_visibilitymap.o \ -src/access/pg_tde_rewrite.o \ -src/access/pg_tdeam_handler.o \ src/transam/pg_tde_xact_handler.o \ src/keyring/keyring_config.o \ src/keyring/keyring_file.o \ diff --git a/meson.build b/meson.build index 65b80052..a9f135ce 100644 --- a/meson.build +++ b/meson.build @@ -5,16 +5,18 @@ jsondep = dependency('json-c') pg_tde_sources = files( 'src/pg_tde.c', 'src/transam/pg_tde_xact_handler.c', + 'src/access/pg_tde_tdemap.c', - 'src/access/pg_tdeam.c', - 'src/access/pg_tdeam_handler.c', - 'src/access/pg_tdeam_visibility.c', - 'src/access/pg_tdetoast.c', - 'src/access/pg_tde_io.c', - 'src/access/pg_tde_prune.c', - 'src/access/pg_tde_rewrite.c', - 'src/access/pg_tde_vacuumlazy.c', - 'src/access/pg_tde_visibilitymap.c', + + 'src/access/heapam_handler.c', + 'src/access/heapam_visibility.c', + 'src/access/heapam.c', + 'src/access/heaptoast.c', + 'src/access/hio.c', + 'src/access/pruneheap.c', + 'src/access/rewriteheap.c', + 'src/access/vacuumlazy.c', + 'src/access/visibilitymap.c', 'src/encryption/enc_tuple.c', 'src/encryption/enc_aes.c', diff --git a/src/access/pg_tdeam.c b/src/access/heapam.c similarity index 93% rename from src/access/pg_tdeam.c rename to src/access/heapam.c index aef070a1..cf209944 100644 --- a/src/access/pg_tdeam.c +++ b/src/access/heapam.c @@ -1,30 +1,30 @@ /*------------------------------------------------------------------------- * - * pg_tdeam.c - * pg_tde access method code + * heapam.c + * heap access method code * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * contrib/pg_tde/pg_tdeam.c + * src/backend/access/heap/heapam.c * * * INTERFACE ROUTINES - * pg_tde_beginscan - begin relation scan - * pg_tde_rescan - restart a relation scan - * pg_tde_endscan - end relation scan - * pg_tde_getnext - retrieve next tuple in scan - * pg_tde_fetch - retrieve tuple with given tid - * pg_tde_insert - insert tuple into a relation - * pg_tde_multi_insert - insert multiple tuples into a relation - * pg_tde_delete - delete a tuple from a relation - * pg_tde_update - replace a tuple in a relation with another tuple + * heap_beginscan - begin relation scan + * heap_rescan - restart a relation scan + * heap_endscan - end relation scan + * heap_getnext - retrieve next tuple in scan + * heap_fetch - retrieve tuple with given tid + * heap_insert - insert tuple into a relation + * heap_multi_insert - insert multiple tuples into a relation + * heap_delete - delete a tuple from a relation + * heap_update - replace a tuple in a relation with another tuple * * NOTES - * This file contains the pg_tde_ routines which implement - * the POSTGRES pg_tde access method used for all POSTGRES + * This file contains the heap_ routines which implement + * the POSTGRES heap access method used for all POSTGRES * relations. * *------------------------------------------------------------------------- @@ -34,15 +34,12 @@ #include "postgres.h" -#include "access/pg_tdeam.h" -#include "access/pg_tdeam_xlog.h" -#include "access/pg_tdetoast.h" -#include "access/pg_tde_io.h" -#include "access/pg_tde_visibilitymap.h" -#include "encryption/enc_tuple.h" - #include "access/bufmask.h" #include "access/genam.h" +#include "access/heapam.h" +#include "access/heapam_xlog.h" +#include "access/heaptoast.h" +#include "access/hio.h" #include "access/multixact.h" #include "access/parallel.h" #include "access/relscan.h" @@ -52,6 +49,8 @@ #include "access/tableam.h" #include "access/transam.h" #include "access/valid.h" +#include "access/visibilitymap.h" +#include "encryption/enc_tuple.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -78,9 +77,9 @@ #include "utils/spccache.h" -static HeapTuple pg_tde_prepare_insert(Relation relation, HeapTuple tup, +static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options); -static XLogRecPtr log_pg_tde_update(Relation reln, Buffer oldbuf, +static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared); @@ -89,7 +88,7 @@ static Bitmapset *HeapDetermineColumnsInfo(Relation relation, Bitmapset *external_cols, HeapTuple oldtup, HeapTuple newtup, bool *has_external); -static bool pg_tde_acquire_tuplock(Relation relation, ItemPointer tid, +static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock); static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, @@ -97,11 +96,11 @@ static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2); -static TM_Result pg_tde_lock_updated_tuple(Relation rel, HeapTuple tuple, +static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode); -static int pg_tde_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, - xl_pg_tde_freeze_plan *plans_out, +static int heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, + xl_heap_freeze_plan *plans_out, OffsetNumber *offsets_out); static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2); @@ -116,7 +115,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status uint16 infomask, Relation rel, int *remaining); static void index_delete_sort(TM_IndexDeleteOp *delstate); static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate); -static XLogRecPtr log_pg_tde_new_cid(Relation relation, HeapTuple tup); +static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, bool *copy); @@ -180,7 +179,7 @@ static const struct #ifdef USE_PREFETCH /* - * pg_tde_index_delete_tuples and index_delete_prefetch_buffer use this + * heap_index_delete_tuples and index_delete_prefetch_buffer use this * structure to coordinate prefetching activity */ typedef struct @@ -192,12 +191,12 @@ typedef struct } IndexDeletePrefetchState; #endif -/* pg_tde_index_delete_tuples bottom-up index deletion costing constants */ +/* heap_index_delete_tuples bottom-up index deletion costing constants */ #define BOTTOMUP_MAX_NBLOCKS 6 #define BOTTOMUP_TOLERANCE_NBLOCKS 3 /* - * pg_tde_index_delete_tuples uses this when determining which heap blocks it + * heap_index_delete_tuples uses this when determining which heap blocks it * must visit to help its bottom-up index deletion caller */ typedef struct IndexDeleteCounts @@ -231,7 +230,7 @@ static const int MultiXactStatusLock[MaxMultiXactStatus + 1] = */ /* ---------------- - * initscan - scan code common to pg_tde_beginscan and pg_tde_rescan + * initscan - scan code common to heap_beginscan and heap_rescan * ---------------- */ static void @@ -346,17 +345,17 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) * and for sample scans we update stats for tuple fetches). */ if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN) - pgstat_count_pg_tde_scan(scan->rs_base.rs_rd); + pgstat_count_heap_scan(scan->rs_base.rs_rd); } /* - * pg_tde_setscanlimits - restrict range of a heapscan + * heap_setscanlimits - restrict range of a heapscan * * startBlk is the page to start at * numBlks is number of pages to scan (InvalidBlockNumber means "all") */ void -pg_tde_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks) +heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -372,14 +371,14 @@ pg_tde_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numB } /* - * pg_tde_getpage - subroutine for pg_tde_gettup() + * heapgetpage - subroutine for heapgettup() * * This routine reads and pins the specified page of the relation. * In page-at-a-time mode it performs additional work, namely determining * which tuples on the page are visible. */ void -pg_tde_getpage(TableScanDesc sscan, BlockNumber block) +heapgetpage(TableScanDesc sscan, BlockNumber block) { HeapScanDesc scan = (HeapScanDesc) sscan; Buffer buffer; @@ -420,7 +419,7 @@ pg_tde_getpage(TableScanDesc sscan, BlockNumber block) /* * Prune and repair fragmentation for the whole page, if possible. */ - pg_tde_page_prune_opt(scan->rs_base.rs_rd, buffer); + heap_page_prune_opt(scan->rs_base.rs_rd, buffer); /* * We must hold share lock on the buffer content while examining tuple @@ -489,14 +488,14 @@ pg_tde_getpage(TableScanDesc sscan, BlockNumber block) } /* - * pg_tde_gettup_initial_block - return the first BlockNumber to scan + * heapgettup_initial_block - return the first BlockNumber to scan * * Returns InvalidBlockNumber when there are no blocks to scan. This can * occur with empty tables and in parallel scans when parallel workers get all * of the pages before we can get a chance to get our first page. */ static BlockNumber -pg_tde_gettup_initial_block(HeapScanDesc scan, ScanDirection dir) +heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir) { Assert(!scan->rs_inited); @@ -537,7 +536,7 @@ pg_tde_gettup_initial_block(HeapScanDesc scan, ScanDirection dir) /* * Start from last page of the scan. Ensure we take into account - * rs_numblocks if it's been adjusted by pg_tde_setscanlimits(). + * rs_numblocks if it's been adjusted by heap_setscanlimits(). */ if (scan->rs_numblocks != InvalidBlockNumber) return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks; @@ -551,7 +550,7 @@ pg_tde_gettup_initial_block(HeapScanDesc scan, ScanDirection dir) /* - * pg_tde_gettup_start_page - helper function for pg_tde_gettup() + * heapgettup_start_page - helper function for heapgettup() * * Return the next page to scan based on the scan->rs_cbuf and set *linesleft * to the number of tuples on this page. Also set *lineoff to the first @@ -559,7 +558,7 @@ pg_tde_gettup_initial_block(HeapScanDesc scan, ScanDirection dir) * getting the final offset on the page. */ static Page -pg_tde_gettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, +heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff) { Page page; @@ -585,14 +584,14 @@ pg_tde_gettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, /* - * pg_tde_gettup_continue_page - helper function for pg_tde_gettup() + * heapgettup_continue_page - helper function for heapgettup() * * Return the next page to scan based on the scan->rs_cbuf and set *linesleft * to the number of tuples left to scan on this page. Also set *lineoff to * the next offset to scan according to the ScanDirection in 'dir'. */ static inline Page -pg_tde_gettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, +heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft, OffsetNumber *lineoff) { Page page; @@ -626,7 +625,7 @@ pg_tde_gettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft } /* - * pg_tde_gettup_advance_block - helper for pg_tde_gettup() and pg_tde_gettup_pagemode() + * heapgettup_advance_block - helper for heapgettup() and heapgettup_pagemode() * * Given the current block number, the scan direction, and various information * contained in the scan descriptor, calculate the BlockNumber to scan next @@ -637,10 +636,10 @@ pg_tde_gettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft * subsequent blocks. * * This also adjusts rs_numblocks when a limit has been imposed by - * pg_tde_setscanlimits(). + * heap_setscanlimits(). */ static inline BlockNumber -pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir) +heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir) { if (ScanDirectionIsForward(dir)) { @@ -652,17 +651,6 @@ pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection if (block >= scan->rs_nblocks) block = 0; - /* we're done if we're back at where we started */ - if (block == scan->rs_startblock) - return InvalidBlockNumber; - - /* check if the limit imposed by pg_tde_setscanlimits() is met */ - if (scan->rs_numblocks != InvalidBlockNumber) - { - if (--scan->rs_numblocks == 0) - return InvalidBlockNumber; - } - /* * Report our new scan position for synchronization purposes. We * don't do that when moving backwards, however. That would just @@ -678,6 +666,17 @@ pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection if (scan->rs_base.rs_flags & SO_ALLOW_SYNC) ss_report_location(scan->rs_base.rs_rd, block); + /* we're done if we're back at where we started */ + if (block == scan->rs_startblock) + return InvalidBlockNumber; + + /* check if the limit imposed by heap_setscanlimits() is met */ + if (scan->rs_numblocks != InvalidBlockNumber) + { + if (--scan->rs_numblocks == 0) + return InvalidBlockNumber; + } + return block; } else @@ -693,7 +692,7 @@ pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection if (block == scan->rs_startblock) return InvalidBlockNumber; - /* check if the limit imposed by pg_tde_setscanlimits() is met */ + /* check if the limit imposed by heap_setscanlimits() is met */ if (scan->rs_numblocks != InvalidBlockNumber) { if (--scan->rs_numblocks == 0) @@ -711,7 +710,7 @@ pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection } /* ---------------- - * pg_tde_gettup - fetch next heap tuple + * heapgettup - fetch next heap tuple * * Initialize the scan if not already done; then advance to the next * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup, @@ -731,7 +730,7 @@ pg_tde_gettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection * ---------------- */ static void -pg_tde_gettup(HeapScanDesc scan, +heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key) @@ -744,7 +743,7 @@ pg_tde_gettup(HeapScanDesc scan, if (unlikely(!scan->rs_inited)) { - block = pg_tde_gettup_initial_block(scan, dir); + block = heapgettup_initial_block(scan, dir); /* ensure rs_cbuf is invalid when we get InvalidBlockNumber */ Assert(block != InvalidBlockNumber || !BufferIsValid(scan->rs_cbuf)); scan->rs_inited = true; @@ -755,7 +754,7 @@ pg_tde_gettup(HeapScanDesc scan, block = scan->rs_cblock; LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - page = pg_tde_gettup_continue_page(scan, dir, &linesleft, &lineoff); + page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff); goto continue_page; } @@ -765,9 +764,9 @@ pg_tde_gettup(HeapScanDesc scan, */ while (block != InvalidBlockNumber) { - pg_tde_getpage((TableScanDesc) scan, block); + heapgetpage((TableScanDesc) scan, block); LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - page = pg_tde_gettup_start_page(scan, dir, &linesleft, &lineoff); + page = heapgettup_start_page(scan, dir, &linesleft, &lineoff); continue_page: /* @@ -819,7 +818,7 @@ pg_tde_gettup(HeapScanDesc scan, LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); /* get the BlockNumber to scan next */ - block = pg_tde_gettup_advance_block(scan, block, dir); + block = heapgettup_advance_block(scan, block, dir); } /* end of scan */ @@ -833,20 +832,20 @@ pg_tde_gettup(HeapScanDesc scan, } /* ---------------- - * pg_tde_gettup_pagemode - fetch next heap tuple in page-at-a-time mode + * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode * - * Same API as pg_tde_gettup, but used in page-at-a-time mode + * Same API as heapgettup, but used in page-at-a-time mode * - * The internal logic is much the same as pg_tde_gettup's too, but there are some + * The internal logic is much the same as heapgettup's too, but there are some * differences: we do not take the buffer content lock (that only needs to - * happen inside pg_tde_getpage), and we iterate through just the tuples listed + * happen inside heapgetpage), and we iterate through just the tuples listed * in rs_vistuples[] rather than all tuples on the page. Notice that * lineindex is 0-based, where the corresponding loop variable lineoff in - * pg_tde_gettup is 1-based. + * heapgettup is 1-based. * ---------------- */ static void -pg_tde_gettup_pagemode(HeapScanDesc scan, +heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key) @@ -859,7 +858,7 @@ pg_tde_gettup_pagemode(HeapScanDesc scan, if (unlikely(!scan->rs_inited)) { - block = pg_tde_gettup_initial_block(scan, dir); + block = heapgettup_initial_block(scan, dir); /* ensure rs_cbuf is invalid when we get InvalidBlockNumber */ Assert(block != InvalidBlockNumber || !BufferIsValid(scan->rs_cbuf)); scan->rs_inited = true; @@ -887,7 +886,7 @@ pg_tde_gettup_pagemode(HeapScanDesc scan, */ while (block != InvalidBlockNumber) { - pg_tde_getpage((TableScanDesc) scan, block); + heapgetpage((TableScanDesc) scan, block); page = BufferGetPage(scan->rs_cbuf); TestForOldSnapshot(scan->rs_base.rs_snapshot, scan->rs_base.rs_rd, page); linesleft = scan->rs_ntuples; @@ -920,7 +919,7 @@ pg_tde_gettup_pagemode(HeapScanDesc scan, } /* get the BlockNumber to scan next */ - block = pg_tde_gettup_advance_block(scan, block, dir); + block = heapgettup_advance_block(scan, block, dir); } /* end of scan */ @@ -940,7 +939,7 @@ pg_tde_gettup_pagemode(HeapScanDesc scan, TableScanDesc -pg_tde_beginscan(Relation relation, Snapshot snapshot, +heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags) @@ -1011,7 +1010,7 @@ pg_tde_beginscan(Relation relation, Snapshot snapshot, scan->rs_parallelworkerdata = NULL; /* - * we do this here instead of in initscan() because pg_tde_rescan also calls + * we do this here instead of in initscan() because heap_rescan also calls * initscan() and we don't want to allocate memory again */ if (nkeys > 0) @@ -1025,7 +1024,7 @@ pg_tde_beginscan(Relation relation, Snapshot snapshot, } void -pg_tde_rescan(TableScanDesc sscan, ScanKey key, bool set_params, +heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1062,7 +1061,7 @@ pg_tde_rescan(TableScanDesc sscan, ScanKey key, bool set_params, } void -pg_tde_endscan(TableScanDesc sscan) +heap_endscan(TableScanDesc sscan) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1095,7 +1094,7 @@ pg_tde_endscan(TableScanDesc sscan) } HeapTuple -pg_tde_getnext(TableScanDesc sscan, ScanDirection direction) +heap_getnext(TableScanDesc sscan, ScanDirection direction) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1112,22 +1111,22 @@ pg_tde_getnext(TableScanDesc sscan, ScanDirection direction) errmsg_internal("only heap AM is supported"))); /* - * We don't expect direct calls to pg_tde_getnext with valid CheckXidAlive + * We don't expect direct calls to heap_getnext with valid CheckXidAlive * for catalog or regular tables. See detailed comments in xact.c where * these variables are declared. Normally we have such a check at tableam * level API but this is called from many places so we need to ensure it * here. */ if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan)) - elog(ERROR, "unexpected pg_tde_getnext call during logical decoding"); + elog(ERROR, "unexpected heap_getnext call during logical decoding"); /* Note: no locking manipulations needed */ if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE) - pg_tde_gettup_pagemode(scan, direction, + heapgettup_pagemode(scan, direction, scan->rs_base.rs_nkeys, scan->rs_base.rs_key); else - pg_tde_gettup(scan, direction, + heapgettup(scan, direction, scan->rs_base.rs_nkeys, scan->rs_base.rs_key); if (scan->rs_ctup.t_data == NULL) @@ -1138,22 +1137,22 @@ pg_tde_getnext(TableScanDesc sscan, ScanDirection direction) * the proper return buffer and return the tuple. */ - pgstat_count_pg_tde_getnext(scan->rs_base.rs_rd); + pgstat_count_heap_getnext(scan->rs_base.rs_rd); return &scan->rs_ctup; } bool -pg_tde_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) +heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) { HeapScanDesc scan = (HeapScanDesc) sscan; /* Note: no locking manipulations needed */ if (sscan->rs_flags & SO_ALLOW_PAGEMODE) - pg_tde_gettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key); + heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key); else - pg_tde_gettup(scan, direction, sscan->rs_nkeys, sscan->rs_key); + heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key); if (scan->rs_ctup.t_data == NULL) { @@ -1166,7 +1165,7 @@ pg_tde_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot * the proper return buffer and return the tuple. */ - pgstat_count_pg_tde_getnext(scan->rs_base.rs_rd); + pgstat_count_heap_getnext(scan->rs_base.rs_rd); PGTdeExecStoreBufferHeapTuple(sscan->rs_rd, &scan->rs_ctup, slot, scan->rs_cbuf); @@ -1174,7 +1173,7 @@ pg_tde_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot } void -pg_tde_set_tidrange(TableScanDesc sscan, ItemPointer mintid, +heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1220,7 +1219,7 @@ pg_tde_set_tidrange(TableScanDesc sscan, ItemPointer mintid, if (ItemPointerCompare(&highestItem, &lowestItem) < 0) { /* Set an empty range of blocks to scan */ - pg_tde_setscanlimits(sscan, 0, 0); + heap_setscanlimits(sscan, 0, 0); return; } @@ -1239,7 +1238,7 @@ pg_tde_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1; /* Set the start block and number of blocks to scan */ - pg_tde_setscanlimits(sscan, startBlk, numBlks); + heap_setscanlimits(sscan, startBlk, numBlks); /* Finally, set the TID range in sscan */ ItemPointerCopy(&lowestItem, &sscan->rs_mintid); @@ -1247,7 +1246,7 @@ pg_tde_set_tidrange(TableScanDesc sscan, ItemPointer mintid, } bool -pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, +heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot) { HeapScanDesc scan = (HeapScanDesc) sscan; @@ -1258,9 +1257,9 @@ pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, for (;;) { if (sscan->rs_flags & SO_ALLOW_PAGEMODE) - pg_tde_gettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key); + heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key); else - pg_tde_gettup(scan, direction, sscan->rs_nkeys, sscan->rs_key); + heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key); if (scan->rs_ctup.t_data == NULL) { @@ -1269,7 +1268,7 @@ pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, } /* - * pg_tde_set_tidrange will have used pg_tde_setscanlimits to limit the + * heap_set_tidrange will have used heap_setscanlimits to limit the * range of pages we scan to only ones that can contain the TID range * we're scanning for. Here we must filter out any tuples from these * pages that are outside of that range. @@ -1314,14 +1313,14 @@ pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, * if we get here it means we have a new current scan tuple, so point to * the proper return buffer and return the tuple. */ - pgstat_count_pg_tde_getnext(scan->rs_base.rs_rd); + pgstat_count_heap_getnext(scan->rs_base.rs_rd); PGTdeExecStoreBufferHeapTuple(sscan->rs_rd, &scan->rs_ctup, slot, scan->rs_cbuf); return true; } /* - * pg_tde_fetch - retrieve tuple with given tid + * heap_fetch - retrieve tuple with given tid * * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding * the tuple, fill in the remaining fields of *tuple, and check the tuple @@ -1341,7 +1340,7 @@ pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, * then tuple->t_data and *userbuf are returned as for the success case, * and again the caller must unpin the buffer; but false is returned. * - * pg_tde_fetch does not follow HOT chains: only the exact TID requested will + * heap_fetch does not follow HOT chains: only the exact TID requested will * be fetched. * * It is somewhat inconsistent that we ereport() on invalid block number but @@ -1354,7 +1353,7 @@ pg_tde_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, * tuple first), but the item number might well not be good. */ bool -pg_tde_fetch(Relation relation, +heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, @@ -1455,7 +1454,7 @@ pg_tde_fetch(Relation relation, } /* - * pg_tde_hot_search_buffer - search HOT chain for tuple satisfying snapshot + * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot * * On entry, *tid is the TID of a tuple (either a simple tuple, or the root * of a HOT chain), and buffer is the buffer holding this tuple. We search @@ -1471,11 +1470,11 @@ pg_tde_fetch(Relation relation, * globally dead; *all_dead is set true if all members of the HOT chain * are vacuumable, false if not. * - * Unlike pg_tde_fetch, the caller must already have pin and (at least) share + * Unlike heap_fetch, the caller must already have pin and (at least) share * lock on the buffer; it is still pinned/locked at exit. */ bool -pg_tde_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, +heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call) { @@ -1616,7 +1615,7 @@ pg_tde_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, } /* - * pg_tde_get_latest_tid - get the latest tid of a specified tuple + * heap_get_latest_tid - get the latest tid of a specified tuple * * Actually, this gets the latest version that is visible according to the * scan's snapshot. Create a scan using SnapshotDirty to get the very latest, @@ -1627,7 +1626,7 @@ pg_tde_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, * if no version of the row passes the snapshot test. */ void -pg_tde_get_latest_tid(TableScanDesc sscan, +heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid) { Relation relation = sscan->rs_rd; @@ -1781,6 +1780,7 @@ GetBulkInsertState(void) bistate->current_buf = InvalidBuffer; bistate->next_free = InvalidBlockNumber; bistate->last_free = InvalidBlockNumber; + bistate->already_extended_by = 0; return bistate; } @@ -1809,7 +1809,7 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate) /* - * pg_tde_insert - insert tuple into a heap + * heap_insert - insert tuple into a heap * * The new tuple is stamped with current transaction ID and the specified * command ID. @@ -1827,7 +1827,7 @@ ReleaseBulkInsertStatePin(BulkInsertState bistate) * reflected into *tup. */ void -pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, +heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate) { TransactionId xid = GetCurrentTransactionId(); @@ -1846,13 +1846,13 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, * Note: below this point, heaptup is the data we actually intend to store * into the relation; tup is the caller's original untoasted data. */ - heaptup = pg_tde_prepare_insert(relation, tup, xid, cid, options); + heaptup = heap_prepare_insert(relation, tup, xid, cid, options); /* * Find buffer to insert this tuple into. If the page is all visible, * this will also pin the requisite visibility map page. */ - buffer = pg_tde_RelationGetBufferForTuple(relation, heaptup->t_len, + buffer = RelationGetBufferForTuple(relation, heaptup->t_len, InvalidBuffer, options, bistate, &vmbuffer, NULL, 0); @@ -1877,14 +1877,14 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); - pg_tde_RelationPutHeapTuple(relation, buffer, heaptup, + RelationPutHeapTuple(relation, buffer, heaptup, (options & HEAP_INSERT_SPECULATIVE) != 0); if (PageIsAllVisible(BufferGetPage(buffer))) { all_visible_cleared = true; PageClearAllVisible(BufferGetPage(buffer)); - pg_tde_visibilitymap_clear(relation, + visibilitymap_clear(relation, ItemPointerGetBlockNumber(&(heaptup->t_self)), vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -1897,7 +1897,7 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the * aborted tuple will never be pruned until next vacuum is triggered. * - * If you do add PageSetPrunable here, add it in pg_tde_xlog_insert too. + * If you do add PageSetPrunable here, add it in heap_xlog_insert too. */ MarkBufferDirty(buffer); @@ -1905,8 +1905,8 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, /* XLOG stuff */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_insert xlrec; - xl_pg_tde_header xlhdr; + xl_heap_insert xlrec; + xl_heap_header xlhdr; XLogRecPtr recptr; Page page = BufferGetPage(buffer); uint8 info = XLOG_HEAP_INSERT; @@ -1917,7 +1917,7 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, * decode, so log that as well. */ if (RelationIsAccessibleInLogicalDecoding(relation)) - log_pg_tde_new_cid(relation, heaptup); + log_heap_new_cid(relation, heaptup); /* * If this is the single and first tuple on page, we can reinit the @@ -1964,7 +1964,7 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, /* * note we mark xlhdr as belonging to buffer; if XLogInsert decides to * write the whole page to the xlog, we don't need to store - * xl_pg_tde_header in the xlog. + * xl_heap_header in the xlog. */ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags); XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader); @@ -1996,7 +1996,7 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, CacheInvalidateHeapTuple(relation, heaptup, NULL); /* Note: speculative insertions are counted too, even if aborted later */ - pgstat_count_pg_tde_insert(relation, 1); + pgstat_count_heap_insert(relation, 1); /* * If heaptup is a private copy, release it. Don't forget to copy t_self @@ -2005,18 +2005,18 @@ pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, if (heaptup != tup) { tup->t_self = heaptup->t_self; - pg_tde_freetuple(heaptup); + heap_freetuple(heaptup); } } /* - * Subroutine for pg_tde_insert(). Prepares a tuple for insertion. This sets the + * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the * tuple header fields and toasts the tuple if necessary. Returns a toasted * version of the tuple if it was toasted, or the original tuple if not. Note * that in any case, the header fields are also set in the original tuple. */ static HeapTuple -pg_tde_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, +heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options) { /* @@ -2053,18 +2053,18 @@ pg_tde_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, return tup; } else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD) - return pg_tde_toast_insert_or_update(relation, tup, NULL, options); + return heap_toast_insert_or_update(relation, tup, NULL, options); else return tup; } /* - * Helper for pg_tde_multi_insert() that computes the number of entire pages + * Helper for heap_multi_insert() that computes the number of entire pages * that inserting the remaining heaptuples requires. Used to determine how * much the relation needs to be extended by. */ static int -pg_tde_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace) +heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace) { size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace; int npages = 1; @@ -2085,10 +2085,10 @@ pg_tde_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size sav } /* - * pg_tde_multi_insert - insert multiple tuples into a heap + * heap_multi_insert - insert multiple tuples into a heap * - * This is like pg_tde_insert(), but inserts multiple tuples in one operation. - * That's faster than calling pg_tde_insert() in a loop, because when multiple + * This is like heap_insert(), but inserts multiple tuples in one operation. + * That's faster than calling heap_insert() in a loop, because when multiple * tuples can be inserted on a single page, we can write just a single WAL * record covering all of them, and only need to lock/unlock the page once. * @@ -2096,7 +2096,7 @@ pg_tde_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size sav * temporary context before calling this, if that's a problem. */ void -pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, +heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate) { TransactionId xid = GetCurrentTransactionId(); @@ -2114,7 +2114,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, int npages = 0; int npages_used = 0; - /* currently not needed (thus unsupported) for pg_tde_multi_insert() */ + /* currently not needed (thus unsupported) for heap_multi_insert() */ Assert(!(options & HEAP_INSERT_NO_LOGICAL)); needwal = RelationNeedsWAL(relation); @@ -2130,7 +2130,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL); slots[i]->tts_tableOid = RelationGetRelid(relation); tuple->t_tableOid = slots[i]->tts_tableOid; - heaptuples[i] = pg_tde_prepare_insert(relation, tuple, xid, cid, + heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid, options); } @@ -2172,7 +2172,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, /* * Compute number of pages needed to fit the to-be-inserted tuples in * the worst case. This will be used to determine how much to extend - * the relation by in pg_tde_RelationGetBufferForTuple(), if needed. If we + * the relation by in RelationGetBufferForTuple(), if needed. If we * filled a prior page from scratch, we can just update our last * computation, but if we started with a partially filled page, * recompute from scratch, the number of potentially required pages @@ -2181,7 +2181,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, */ if (ndone == 0 || !starting_with_empty_page) { - npages = pg_tde_multi_insert_pages(heaptuples, ndone, ntuples, + npages = heap_multi_insert_pages(heaptuples, ndone, ntuples, saveFreeSpace); npages_used = 0; } @@ -2195,7 +2195,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, * Also pin visibility map page if COPY FREEZE inserts tuples into an * empty page. See all_frozen_set below. */ - buffer = pg_tde_RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, + buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, InvalidBuffer, options, bistate, &vmbuffer, NULL, npages - npages_used); @@ -2210,17 +2210,17 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, START_CRIT_SECTION(); /* - * pg_tde_RelationGetBufferForTuple has ensured that the first tuple fits. + * RelationGetBufferForTuple has ensured that the first tuple fits. * Put that on the page, and then as many other tuples as fit. */ - pg_tde_RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false); + RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false); /* * For logical decoding we need combo CIDs to properly decode the * catalog. */ if (needwal && need_cids) - log_pg_tde_new_cid(relation, heaptuples[ndone]); + log_heap_new_cid(relation, heaptuples[ndone]); for (nthispage = 1; ndone + nthispage < ntuples; nthispage++) { @@ -2229,14 +2229,14 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace) break; - pg_tde_RelationPutHeapTuple(relation, buffer, heaptup, false); + RelationPutHeapTuple(relation, buffer, heaptup, false); /* * For logical decoding we need combo CIDs to properly decode the * catalog. */ if (needwal && need_cids) - log_pg_tde_new_cid(relation, heaptup); + log_heap_new_cid(relation, heaptup); } /* @@ -2250,7 +2250,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, { all_visible_cleared = true; PageClearAllVisible(page); - pg_tde_visibilitymap_clear(relation, + visibilitymap_clear(relation, BufferGetBlockNumber(buffer), vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -2258,7 +2258,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, PageSetAllVisible(page); /* - * XXX Should we set PageSetPrunable on this page ? See pg_tde_insert() + * XXX Should we set PageSetPrunable on this page ? See heap_insert() */ MarkBufferDirty(buffer); @@ -2267,7 +2267,7 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, if (needwal) { XLogRecPtr recptr; - xl_pg_tde_multi_insert *xlrec; + xl_heap_multi_insert *xlrec; uint8 info = XLOG_HEAP2_MULTI_INSERT; char *tupledata; int totaldatalen; @@ -2281,8 +2281,8 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, */ init = starting_with_empty_page; - /* allocate xl_pg_tde_multi_insert struct from the scratch area */ - xlrec = (xl_pg_tde_multi_insert *) scratchptr; + /* allocate xl_heap_multi_insert struct from the scratch area */ + xlrec = (xl_heap_multi_insert *) scratchptr; scratchptr += SizeOfHeapMultiInsert; /* @@ -2343,8 +2343,8 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE; /* - * Signal that this is the last xl_pg_tde_multi_insert record - * emitted by this call to pg_tde_multi_insert(). Needed for logical + * Signal that this is the last xl_heap_multi_insert record + * emitted by this call to heap_multi_insert(). Needed for logical * decoding so it knows when to cleanup temporary data. */ if (ndone + nthispage == ntuples) @@ -2386,14 +2386,14 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, if (all_frozen_set) { Assert(PageIsAllVisible(page)); - Assert(pg_tde_visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)); + Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer)); /* * It's fine to use InvalidTransactionId here - this is only used * when HEAP_INSERT_FROZEN is specified, which intentionally * violates visibility rules. */ - pg_tde_visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer, + visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer, InvalidXLogRecPtr, vmbuffer, InvalidTransactionId, VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); @@ -2445,28 +2445,28 @@ pg_tde_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, for (i = 0; i < ntuples; i++) slots[i]->tts_tid = heaptuples[i]->t_self; - pgstat_count_pg_tde_insert(relation, ntuples); + pgstat_count_heap_insert(relation, ntuples); } /* - * simple_pg_tde_insert - insert a tuple + * simple_heap_insert - insert a tuple * - * Currently, this routine differs from pg_tde_insert only in supplying + * Currently, this routine differs from heap_insert only in supplying * a default command ID and not allowing access to the speedup options. * - * This should be used rather than using pg_tde_insert directly in most places + * This should be used rather than using heap_insert directly in most places * where we are modifying system catalogs. */ void -simple_pg_tde_insert(Relation relation, HeapTuple tup) +simple_heap_insert(Relation relation, HeapTuple tup) { - pg_tde_insert(relation, tup, GetCurrentCommandId(true), 0, NULL); + heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL); } /* * Given infomask/infomask2, compute the bits that must be saved in the - * "infobits" field of xl_pg_tde_delete, xl_pg_tde_update, xl_pg_tde_lock, - * xl_pg_tde_lock_updated WAL records. + * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock, + * xl_heap_lock_updated WAL records. * * See fix_infomask_from_infobits. */ @@ -2505,7 +2505,7 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) } /* - * pg_tde_delete - delete a tuple + * heap_delete - delete a tuple * * See table_tuple_delete() for an explanation of the parameters, except that * this routine directly takes a tuple rather than a slot. @@ -2516,7 +2516,7 @@ xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) * generated by another transaction). */ TM_Result -pg_tde_delete(Relation relation, ItemPointer tid, +heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart) { @@ -2560,7 +2560,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, * the lock. */ if (PageIsAllVisible(page)) - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -2583,7 +2583,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } @@ -2611,7 +2611,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, * which lock mode the locker has, because we need the strongest one. * * Before sleeping, we need to acquire tuple lock to establish our - * priority for the tuple (see pg_tde_lock_tuple). LockTuple will + * priority for the tuple (see heap_lock_tuple). LockTuple will * release us when we are next-in-line for the tuple. * * If we are forced to "start over" below, we keep the tuple lock; @@ -2632,7 +2632,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, * requesting a lock and already have one; avoids deadlock). */ if (!current_is_member) - pg_tde_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, + heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, LockWaitBlock, &have_tuple_lock); /* wait for multixact */ @@ -2673,7 +2673,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, * lock. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - pg_tde_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, + heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, LockWaitBlock, &have_tuple_lock); XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -2790,7 +2790,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, { all_visible_cleared = true; PageClearAllVisible(page); - pg_tde_visibilitymap_clear(relation, BufferGetBlockNumber(buffer), + visibilitymap_clear(relation, BufferGetBlockNumber(buffer), vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -2814,13 +2814,13 @@ pg_tde_delete(Relation relation, ItemPointer tid, /* * XLOG stuff * - * NB: pg_tde_abort_speculative() uses the same xlog record and replay + * NB: heap_abort_speculative() uses the same xlog record and replay * routines. */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_delete xlrec; - xl_pg_tde_header xlhdr; + xl_heap_delete xlrec; + xl_heap_header xlhdr; XLogRecPtr recptr; /* @@ -2828,7 +2828,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, * catalog */ if (RelationIsAccessibleInLogicalDecoding(relation)) - log_pg_tde_new_cid(relation, &tp); + log_heap_new_cid(relation, &tp); xlrec.flags = 0; if (all_visible_cleared) @@ -2897,7 +2897,7 @@ pg_tde_delete(Relation relation, ItemPointer tid, Assert(!HeapTupleHasExternal(&tp)); } else if (HeapTupleHasExternal(&tp)) - pg_tde_toast_delete(relation, &tp, false); + heap_toast_delete(relation, &tp, false); /* * Mark tuple for invalidation from system caches at next command @@ -2915,16 +2915,16 @@ pg_tde_delete(Relation relation, ItemPointer tid, if (have_tuple_lock) UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive); - pgstat_count_pg_tde_delete(relation); + pgstat_count_heap_delete(relation); if (old_key_tuple != NULL && old_key_copied) - pg_tde_freetuple(old_key_tuple); + heap_freetuple(old_key_tuple); return TM_Ok; } /* - * simple_pg_tde_delete - delete a tuple + * simple_heap_delete - delete a tuple * * This routine may be used to delete a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock @@ -2932,12 +2932,12 @@ pg_tde_delete(Relation relation, ItemPointer tid, * via ereport(). */ void -simple_pg_tde_delete(Relation relation, ItemPointer tid) +simple_heap_delete(Relation relation, ItemPointer tid) { TM_Result result; TM_FailureData tmfd; - result = pg_tde_delete(relation, tid, + result = heap_delete(relation, tid, GetCurrentCommandId(true), InvalidSnapshot, true /* wait for commit */ , &tmfd, false /* changingPart */ ); @@ -2961,13 +2961,13 @@ simple_pg_tde_delete(Relation relation, ItemPointer tid) break; default: - elog(ERROR, "unrecognized pg_tde_delete status: %u", result); + elog(ERROR, "unrecognized heap_delete status: %u", result); break; } } /* - * pg_tde_update - replace a tuple + * heap_update - replace a tuple * * See table_tuple_update() for an explanation of the parameters, except that * this routine directly takes a tuple rather than a slot. @@ -2978,7 +2978,7 @@ simple_pg_tde_delete(Relation relation, ItemPointer tid) * generated by another transaction). */ TM_Result -pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, +heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes) @@ -3079,7 +3079,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * the lock. */ if (PageIsAllVisible(page)) - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); @@ -3183,7 +3183,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * specially. * * This may cause failures with third-party code that calls - * pg_tde_update directly. + * heap_update directly. */ /* must copy state data before unlocking buffer */ @@ -3202,7 +3202,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * it is preserved). * * Before sleeping, we need to acquire tuple lock to establish our - * priority for the tuple (see pg_tde_lock_tuple). LockTuple will + * priority for the tuple (see heap_lock_tuple). LockTuple will * release us when we are next-in-line for the tuple. Note we must * not acquire the tuple lock until we're sure we're going to sleep; * otherwise we're open for race conditions with other transactions @@ -3228,7 +3228,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * requesting a lock and already have one; avoids deadlock). */ if (!current_is_member) - pg_tde_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, + heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, LockWaitBlock, &have_tuple_lock); /* wait for multixact */ @@ -3313,7 +3313,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * lock. */ LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - pg_tde_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, + heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, LockWaitBlock, &have_tuple_lock); XactLockTableWait(xwait, relation, &oldtup.t_self, XLTW_Update); @@ -3397,7 +3397,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); goto l2; } @@ -3513,7 +3513,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * * To satisfy the rule that any xid potentially appearing in a buffer * written out to disk, we unfortunately have to WAL log this - * temporary modification. We can reuse xl_pg_tde_lock for this + * temporary modification. We can reuse xl_heap_lock for this * purpose. If we crash/error before following through with the * actual update, xmax will be of an aborted transaction, allowing * other sessions to proceed. @@ -3557,7 +3557,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * worthwhile. */ if (PageIsAllVisible(page) && - pg_tde_visibilitymap_clear(relation, block, vmbuffer, + visibilitymap_clear(relation, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN)) cleared_all_frozen = true; @@ -3565,7 +3565,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (RelationNeedsWAL(relation)) { - xl_pg_tde_lock xlrec; + xl_heap_lock xlrec; XLogRecPtr recptr; XLogBeginInsert(); @@ -3596,7 +3596,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (need_toast) { /* Note we always use WAL and FSM during updates */ - heaptup = pg_tde_toast_insert_or_update(relation, newtup, &oldtup, 0); + heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0); newtupsize = MAXALIGN(heaptup->t_len); } else @@ -3615,13 +3615,13 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * some other backend trying to get the same two locks in the other * order, we must be consistent about the order we get the locks in. * We use the rule "lock the lower-numbered page of the relation - * first". To implement this, we must do pg_tde_RelationGetBufferForTuple + * first". To implement this, we must do RelationGetBufferForTuple * while not holding the lock on the old page, and we must rely on it * to get the locks on both pages in the correct order. * * Another consideration is that we need visibility map page pin(s) if * we will have to clear the all-visible flag on either page. If we - * call pg_tde_RelationGetBufferForTuple, we rely on it to acquire any such + * call RelationGetBufferForTuple, we rely on it to acquire any such * pins; but if we don't, we have to handle that here. Hence we need * a loop. */ @@ -3629,8 +3629,8 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, { if (newtupsize > pagefree) { - /* It doesn't fit, must use pg_tde_RelationGetBufferForTuple. */ - newbuf = pg_tde_RelationGetBufferForTuple(relation, heaptup->t_len, + /* It doesn't fit, must use RelationGetBufferForTuple. */ + newbuf = RelationGetBufferForTuple(relation, heaptup->t_len, buffer, 0, NULL, &vmbuffer_new, &vmbuffer, 0); @@ -3639,7 +3639,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, } /* Acquire VM page pin if needed and we don't have it. */ if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); /* Re-acquire the lock on the old tuple's page. */ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); /* Re-check using the up-to-date free space */ @@ -3744,7 +3744,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * * XXX Should we set hint on newbuf as well? If the transaction aborts, * there would be a prunable tuple in the newbuf; but for now we choose - * not to optimize for aborts. Note that pg_tde_xlog_update must be kept in + * not to optimize for aborts. Note that heap_xlog_update must be kept in * sync if this decision changes. */ PageSetPrunable(page, xid); @@ -3766,7 +3766,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, HeapTupleClearHeapOnly(newtup); } - pg_tde_RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */ + RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */ /* Clear obsolete visibility flags, possibly set by ourselves above... */ @@ -3787,14 +3787,14 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, { all_visible_cleared = true; PageClearAllVisible(BufferGetPage(buffer)); - pg_tde_visibilitymap_clear(relation, BufferGetBlockNumber(buffer), + visibilitymap_clear(relation, BufferGetBlockNumber(buffer), vmbuffer, VISIBILITYMAP_VALID_BITS); } if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf))) { all_visible_cleared_new = true; PageClearAllVisible(BufferGetPage(newbuf)); - pg_tde_visibilitymap_clear(relation, BufferGetBlockNumber(newbuf), + visibilitymap_clear(relation, BufferGetBlockNumber(newbuf), vmbuffer_new, VISIBILITYMAP_VALID_BITS); } @@ -3813,11 +3813,11 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, */ if (RelationIsAccessibleInLogicalDecoding(relation)) { - log_pg_tde_new_cid(relation, &oldtup); - log_pg_tde_new_cid(relation, heaptup); + log_heap_new_cid(relation, &oldtup); + log_heap_new_cid(relation, heaptup); } - recptr = log_pg_tde_update(relation, buffer, + recptr = log_heap_update(relation, buffer, newbuf, &oldtup, heaptup, old_key_tuple, all_visible_cleared, @@ -3860,7 +3860,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (have_tuple_lock) UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode); - pgstat_count_pg_tde_update(relation, use_hot_update, newbuf != buffer); + pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer); /* * If heaptup is a private copy, release it. Don't forget to copy t_self @@ -3869,7 +3869,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (heaptup != newtup) { newtup->t_self = heaptup->t_self; - pg_tde_freetuple(heaptup); + heap_freetuple(heaptup); } /* @@ -3889,7 +3889,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, *update_indexes = TU_All; if (old_key_tuple != NULL && old_key_copied) - pg_tde_freetuple(old_key_tuple); + heap_freetuple(old_key_tuple); bms_free(hot_attrs); bms_free(sum_attrs); @@ -3906,7 +3906,7 @@ pg_tde_update(Relation relation, ItemPointer otid, HeapTuple newtup, * HeapDetermineColumnsInfo. */ static bool -pg_tde_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, +heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2, bool isnull1, bool isnull2) { Form_pg_attribute att; @@ -4005,13 +4005,13 @@ HeapDetermineColumnsInfo(Relation relation, /* * Extract the corresponding values. XXX this is pretty inefficient * if there are many indexed columns. Should we do a single - * pg_tde_deform_tuple call on each tuple, instead? But that doesn't + * heap_deform_tuple call on each tuple, instead? But that doesn't * work for system columns ... */ - value1 = pg_tde_getattr(oldtup, attrnum, tupdesc, &isnull1); - value2 = pg_tde_getattr(newtup, attrnum, tupdesc, &isnull2); + value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1); + value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2); - if (!pg_tde_attr_equals(tupdesc, attrnum, value1, + if (!heap_attr_equals(tupdesc, attrnum, value1, value2, isnull1, isnull2)) { modified = bms_add_member(modified, attidx); @@ -4039,7 +4039,7 @@ HeapDetermineColumnsInfo(Relation relation, } /* - * simple_pg_tde_update - replace a tuple + * simple_heap_update - replace a tuple * * This routine may be used to update a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock @@ -4047,14 +4047,14 @@ HeapDetermineColumnsInfo(Relation relation, * via ereport(). */ void -simple_pg_tde_update(Relation relation, ItemPointer otid, HeapTuple tup, +simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup, TU_UpdateIndexes *update_indexes) { TM_Result result; TM_FailureData tmfd; LockTupleMode lockmode; - result = pg_tde_update(relation, otid, tup, + result = heap_update(relation, otid, tup, GetCurrentCommandId(true), InvalidSnapshot, true /* wait for commit */ , &tmfd, &lockmode, update_indexes); @@ -4078,7 +4078,7 @@ simple_pg_tde_update(Relation relation, ItemPointer otid, HeapTuple tup, break; default: - elog(ERROR, "unrecognized pg_tde_update status: %u", result); + elog(ERROR, "unrecognized heap_update status: %u", result); break; } } @@ -4105,7 +4105,7 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update) } /* - * pg_tde_lock_tuple - lock a tuple in shared or exclusive mode + * heap_lock_tuple - lock a tuple in shared or exclusive mode * * Note that this acquires a buffer pin, which the caller must release. * @@ -4136,7 +4136,7 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update) * See README.tuplock for a thorough explanation of this mechanism. */ TM_Result -pg_tde_lock_tuple(Relation relation, HeapTuple tuple, +heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd) @@ -4167,7 +4167,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, * the lock. */ if (PageIsAllVisible(BufferGetPage(*buffer))) - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); @@ -4354,7 +4354,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, { TM_Result res; - res = pg_tde_lock_updated_tuple(relation, tuple, &t_ctid, + res = heap_lock_updated_tuple(relation, tuple, &t_ctid, GetCurrentTransactionId(), mode); if (res != TM_Ok) @@ -4507,7 +4507,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, * rechecking tuple state. */ if (!skip_tuple_lock && - !pg_tde_acquire_tuplock(relation, tid, mode, wait_policy, + !heap_acquire_tuplock(relation, tid, mode, wait_policy, &have_tuple_lock)) { /* @@ -4526,7 +4526,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, /* We only ever lock tuples, never update them */ if (status >= MultiXactStatusNoKeyUpdate) - elog(ERROR, "invalid lock mode in pg_tde_lock_tuple"); + elog(ERROR, "invalid lock mode in heap_lock_tuple"); /* wait for multixact to end, or die trying */ switch (wait_policy) @@ -4601,7 +4601,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, { TM_Result res; - res = pg_tde_lock_updated_tuple(relation, tuple, &t_ctid, + res = heap_lock_updated_tuple(relation, tuple, &t_ctid, GetCurrentTransactionId(), mode); if (res != TM_Ok) @@ -4668,7 +4668,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, * TM_WouldBlock above, it's possible for concurrent transactions to * release the lock and set HEAP_XMAX_INVALID in the meantime. So * this assert is slightly different from the equivalent one in - * pg_tde_delete and pg_tde_update. + * heap_delete and heap_update. */ Assert((result == TM_WouldBlock) || !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID)); @@ -4695,7 +4695,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, if (vmbuffer == InvalidBuffer && PageIsAllVisible(page)) { LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); - pg_tde_visibilitymap_pin(relation, block, &vmbuffer); + visibilitymap_pin(relation, block, &vmbuffer); LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE); goto l3; } @@ -4754,7 +4754,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, /* Clear only the all-frozen bit on visibility map if needed */ if (PageIsAllVisible(page) && - pg_tde_visibilitymap_clear(relation, block, vmbuffer, + visibilitymap_clear(relation, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN)) cleared_all_frozen = true; @@ -4775,7 +4775,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_lock xlrec; + xl_heap_lock xlrec; XLogRecPtr recptr; XLogBeginInsert(); @@ -4834,7 +4834,7 @@ pg_tde_lock_tuple(Relation relation, HeapTuple tuple, * wait_policy is Skip. */ static bool -pg_tde_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, +heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock) { if (*have_tuple_lock) @@ -5149,7 +5149,7 @@ compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, } /* - * Subroutine for pg_tde_lock_updated_tuple_rec. + * Subroutine for heap_lock_updated_tuple_rec. * * Given a hypothetical multixact status held by the transaction identified * with the given xid, does the current transaction need to wait, fail, or can @@ -5248,14 +5248,14 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, /* - * Recursive part of pg_tde_lock_updated_tuple + * Recursive part of heap_lock_updated_tuple * * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given * xid with the given mode; if this tuple is updated, recurse to lock the new * version as well. */ static TM_Result -pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, +heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode) { TM_Result result; @@ -5283,7 +5283,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, block = ItemPointerGetBlockNumber(&tupid); ItemPointerCopy(&tupid, &(mytup.t_self)); - if (!pg_tde_fetch(rel, SnapshotAny, &mytup, &buf, false)) + if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false)) { /* * if we fail to find the updated version of the tuple, it's @@ -5307,7 +5307,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, */ if (PageIsAllVisible(BufferGetPage(buf))) { - pg_tde_visibilitymap_pin(rel, block, &vmbuffer); + visibilitymap_pin(rel, block, &vmbuffer); pinned_desired_page = true; } else @@ -5329,7 +5329,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf))) { LockBuffer(buf, BUFFER_LOCK_UNLOCK); - pg_tde_visibilitymap_pin(rel, block, &vmbuffer); + visibilitymap_pin(rel, block, &vmbuffer); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); } @@ -5401,7 +5401,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, /* * If the tuple was already locked by ourselves in a - * previous iteration of this (say pg_tde_lock_tuple was + * previous iteration of this (say heap_lock_tuple was * forced to restart the locking loop because of a change * in xmax), then we hold the lock already on this tuple * version and we don't need to do anything; and this is @@ -5478,7 +5478,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, /* * If the tuple was already locked by ourselves in a previous - * iteration of this (say pg_tde_lock_tuple was forced to + * iteration of this (say heap_lock_tuple was forced to * restart the locking loop because of a change in xmax), then * we hold the lock already on this tuple version and we don't * need to do anything; and this is not an error condition @@ -5508,7 +5508,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, &new_xmax, &new_infomask, &new_infomask2); if (PageIsAllVisible(BufferGetPage(buf)) && - pg_tde_visibilitymap_clear(rel, block, vmbuffer, + visibilitymap_clear(rel, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN)) cleared_all_frozen = true; @@ -5526,7 +5526,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, /* XLOG stuff */ if (RelationNeedsWAL(rel)) { - xl_pg_tde_lock_updated xlrec; + xl_heap_lock_updated xlrec; XLogRecPtr recptr; Page page = BufferGetPage(buf); @@ -5578,7 +5578,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, } /* - * pg_tde_lock_updated_tuple + * heap_lock_updated_tuple * Follow update chain when locking an updated tuple, acquiring locks (row * marks) on the updated versions. * @@ -5591,7 +5591,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, * * Note that we don't acquire heavyweight tuple locks on the tuples we walk * when we have to wait for other transactions to release them, as opposed to - * what pg_tde_lock_tuple does. The reason is that having more than one + * what heap_lock_tuple does. The reason is that having more than one * transaction walking the chain is probably uncommon enough that risk of * starvation is not likely: one of the preconditions for being here is that * the snapshot in use predates the update that created this tuple (because we @@ -5600,7 +5600,7 @@ pg_tde_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid, * levels, because that would lead to a serializability failure. */ static TM_Result -pg_tde_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, +heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode) { /* @@ -5621,7 +5621,7 @@ pg_tde_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, */ MultiXactIdSetOldestMember(); - return pg_tde_lock_updated_tuple_rec(rel, ctid, xid, mode); + return heap_lock_updated_tuple_rec(rel, ctid, xid, mode); } /* nothing to lock */ @@ -5629,7 +5629,7 @@ pg_tde_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, } /* - * pg_tde_finish_speculative - mark speculative insertion as successful + * heap_finish_speculative - mark speculative insertion as successful * * To successfully finish a speculative insertion we have to clear speculative * token from tuple. To do so the t_ctid field, which will contain a @@ -5645,7 +5645,7 @@ pg_tde_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, * An explicit confirmation WAL record also makes logical decoding simpler. */ void -pg_tde_finish_speculative(Relation relation, ItemPointer tid) +heap_finish_speculative(Relation relation, ItemPointer tid) { Buffer buffer; Page page; @@ -5682,7 +5682,7 @@ pg_tde_finish_speculative(Relation relation, ItemPointer tid) /* XLOG stuff */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_confirm xlrec; + xl_heap_confirm xlrec; XLogRecPtr recptr; xlrec.offnum = ItemPointerGetOffsetNumber(tid); @@ -5706,7 +5706,7 @@ pg_tde_finish_speculative(Relation relation, ItemPointer tid) } /* - * pg_tde_abort_speculative - kill a speculatively inserted tuple + * heap_abort_speculative - kill a speculatively inserted tuple * * Marks a tuple that was speculatively inserted in the same command as dead, * by setting its xmin as invalid. That makes it immediately appear as dead @@ -5724,7 +5724,7 @@ pg_tde_finish_speculative(Relation relation, ItemPointer tid) * inserters did not take this precaution, then under high concurrency they * could deadlock with each other, which would not be acceptable. * - * This is somewhat redundant with pg_tde_delete, but we prefer to have a + * This is somewhat redundant with heap_delete, but we prefer to have a * dedicated routine with stripped down requirements. Note that this is also * used to delete the TOAST tuples created during speculative insertion. * @@ -5732,7 +5732,7 @@ pg_tde_finish_speculative(Relation relation, ItemPointer tid) * confirmation records. */ void -pg_tde_abort_speculative(Relation relation, ItemPointer tid) +heap_abort_speculative(Relation relation, ItemPointer tid) { TransactionId xid = GetCurrentTransactionId(); ItemId lp; @@ -5818,12 +5818,12 @@ pg_tde_abort_speculative(Relation relation, ItemPointer tid) /* * XLOG stuff * - * The WAL records generated here match pg_tde_delete(). The same recovery + * The WAL records generated here match heap_delete(). The same recovery * routines are used. */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_delete xlrec; + xl_heap_delete xlrec; XLogRecPtr recptr; xlrec.flags = XLH_DELETE_IS_SUPER; @@ -5850,7 +5850,7 @@ pg_tde_abort_speculative(Relation relation, ItemPointer tid) if (HeapTupleHasExternal(&tp)) { Assert(!IsToastRelation(relation)); - pg_tde_toast_delete(relation, &tp, true); + heap_toast_delete(relation, &tp, true); } /* @@ -5862,11 +5862,11 @@ pg_tde_abort_speculative(Relation relation, ItemPointer tid) ReleaseBuffer(buffer); /* count deletion, as we counted the insertion too */ - pgstat_count_pg_tde_delete(relation); + pgstat_count_heap_delete(relation); } /* - * pg_tde_inplace_update - update a tuple "in place" (ie, overwrite it) + * heap_inplace_update - update a tuple "in place" (ie, overwrite it) * * Overwriting violates both MVCC and transactional safety, so the uses * of this function in Postgres are extremely limited. Nonetheless we @@ -5885,7 +5885,7 @@ pg_tde_abort_speculative(Relation relation, ItemPointer tid) * include toast values that have been expanded, causing a failure here. */ void -pg_tde_inplace_update(Relation relation, HeapTuple tuple) +heap_inplace_update(Relation relation, HeapTuple tuple) { Buffer buffer; Page page; @@ -5918,7 +5918,6 @@ pg_tde_inplace_update(Relation relation, HeapTuple tuple) elog(ERROR, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // encryption / decryption here: HOW? oldlen = ItemIdGetLength(lp) - htup->t_hoff; newlen = tuple->t_len - tuple->t_data->t_hoff; @@ -5937,7 +5936,7 @@ pg_tde_inplace_update(Relation relation, HeapTuple tuple) /* XLOG stuff */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_inplace xlrec; + xl_heap_inplace xlrec; XLogRecPtr recptr; xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self); @@ -6015,9 +6014,9 @@ pg_tde_inplace_update(Relation relation, HeapTuple tuple) * its own special risks. * * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers - * using pg_tde_tuple_should_freeze when we haven't forced page-level freezing. + * using heap_tuple_should_freeze when we haven't forced page-level freezing. * - * NB: Caller should avoid needlessly calling pg_tde_tuple_should_freeze when we + * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we * have already forced page-level freezing, since that might incur the same * SLRU buffer misses that we specifically intended to avoid by freezing. */ @@ -6329,7 +6328,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, } /* - * pg_tde_prepare_freeze_tuple + * heap_prepare_freeze_tuple * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so, @@ -6343,7 +6342,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * XIDs or MultiXactIds that will need to be processed by a future VACUUM. * * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every - * tuple that we returned true for, and call pg_tde_freeze_execute_prepared to + * tuple that we returned true for, and call heap_freeze_execute_prepared to * execute freezing. Caller must initialize pagefrz fields for page as a * whole before first call here for each heap page. * @@ -6368,11 +6367,11 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * * NB: This function has side effects: it might allocate a new MultiXactId. * It will be set as tuple's new xmax when our *frz output is processed within - * pg_tde_execute_freeze_tuple later on. If the tuple is in a shared buffer + * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer * then caller had better have an exclusive lock on it already. */ bool -pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, +heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen) @@ -6467,7 +6466,7 @@ pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, * * FreezeMultiXactId is _not_ responsible for the "no freeze" * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our - * job. A call to pg_tde_tuple_should_freeze for this same tuple + * job. A call to heap_tuple_should_freeze for this same tuple * will take place below if 'freeze_required' isn't set already. * (This repeats work from FreezeMultiXactId, but allows "no * freeze" tracker maintenance to happen in only one place.) @@ -6627,7 +6626,7 @@ pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, * Does this tuple force caller to freeze the entire page? */ pagefrz->freeze_required = - pg_tde_tuple_should_freeze(tuple, cutoffs, + heap_tuple_should_freeze(tuple, cutoffs, &pagefrz->NoFreezePageRelfrozenXid, &pagefrz->NoFreezePageRelminMxid); } @@ -6637,7 +6636,7 @@ pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, } /* - * pg_tde_execute_freeze_tuple + * heap_execute_freeze_tuple * Execute the prepared freezing of a tuple with caller's freeze plan. * * Caller is responsible for ensuring that no other backend can access the @@ -6646,7 +6645,7 @@ pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, * in private storage (which is what CLUSTER and friends do). */ static inline void -pg_tde_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz) +heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz) { HeapTupleHeaderSetXmax(tuple, frz->xmax); @@ -6661,10 +6660,10 @@ pg_tde_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz) } /* - * pg_tde_freeze_execute_prepared + * heap_freeze_execute_prepared * * Executes freezing of one or more heap tuples on a page on behalf of caller. - * Caller passes an array of tuple plans from pg_tde_prepare_freeze_tuple. + * Caller passes an array of tuple plans from heap_prepare_freeze_tuple. * Caller must set 'offset' in each plan for us. Note that we destructively * sort caller's tuples array in-place, so caller had better be done with it. * @@ -6675,7 +6674,7 @@ pg_tde_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz) * See section on buffer access rules in src/backend/storage/buffer/README. */ void -pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, +heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples) { @@ -6686,7 +6685,7 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, /* * Perform xmin/xmax XID status sanity checks before critical section. * - * pg_tde_prepare_freeze_tuple doesn't perform these checks directly because + * heap_prepare_freeze_tuple doesn't perform these checks directly because * pg_xact lookups are relatively expensive. They shouldn't be repeated * by successive VACUUMs that each decide against freezing the same page. */ @@ -6701,7 +6700,6 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, /* Deliberately avoid relying on tuple hint bits here */ if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED) { - // TODO: how to keep compiling both? TransactionId xmin = HeapTupleHeaderGetRawXmin(htup); Assert(!HeapTupleHeaderXminFrozen(htup)); @@ -6739,8 +6737,7 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, HeapTupleHeader htup; htup = (HeapTupleHeader) PageGetItem(page, itemid); - // TODO: Decryption/encryption here - pg_tde_execute_freeze_tuple(htup, frz); + heap_execute_freeze_tuple(htup, frz); } MarkBufferDirty(buffer); @@ -6748,14 +6745,14 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, /* Now WAL-log freezing if necessary */ if (RelationNeedsWAL(rel)) { - xl_pg_tde_freeze_plan plans[MaxHeapTuplesPerPage]; + xl_heap_freeze_plan plans[MaxHeapTuplesPerPage]; OffsetNumber offsets[MaxHeapTuplesPerPage]; int nplans; - xl_pg_tde_freeze_page xlrec; + xl_heap_freeze_page xlrec; XLogRecPtr recptr; /* Prepare deduplicated representation for use in WAL record */ - nplans = pg_tde_log_freeze_plan(tuples, ntuples, plans, offsets); + nplans = heap_log_freeze_plan(tuples, ntuples, plans, offsets); xlrec.snapshotConflictHorizon = snapshotConflictHorizon; xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(rel); @@ -6771,7 +6768,7 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, */ XLogRegisterBuffer(0, buffer, REGBUF_STANDARD); XLogRegisterBufData(0, (char *) plans, - nplans * sizeof(xl_pg_tde_freeze_plan)); + nplans * sizeof(xl_heap_freeze_plan)); XLogRegisterBufData(0, (char *) offsets, ntuples * sizeof(OffsetNumber)); @@ -6787,7 +6784,7 @@ pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, * Comparator used to deduplicate XLOG_HEAP2_FREEZE_PAGE freeze plans */ static int -pg_tde_log_freeze_cmp(const void *arg1, const void *arg2) +heap_log_freeze_cmp(const void *arg1, const void *arg2) { HeapTupleFreeze *frz1 = (HeapTupleFreeze *) arg1; HeapTupleFreeze *frz2 = (HeapTupleFreeze *) arg2; @@ -6813,7 +6810,7 @@ pg_tde_log_freeze_cmp(const void *arg1, const void *arg2) return 1; /* - * pg_tde_log_freeze_eq would consider these tuple-wise plans to be equal. + * heap_log_freeze_eq would consider these tuple-wise plans to be equal. * (So the tuples will share a single canonical freeze plan.) * * We tiebreak on page offset number to keep each freeze plan's page @@ -6834,7 +6831,7 @@ pg_tde_log_freeze_cmp(const void *arg1, const void *arg2) * caller's plan. */ static inline bool -pg_tde_log_freeze_eq(xl_pg_tde_freeze_plan *plan, HeapTupleFreeze *frz) +heap_log_freeze_eq(xl_heap_freeze_plan *plan, HeapTupleFreeze *frz) { if (plan->xmax == frz->xmax && plan->t_infomask2 == frz->t_infomask2 && @@ -6842,7 +6839,7 @@ pg_tde_log_freeze_eq(xl_pg_tde_freeze_plan *plan, HeapTupleFreeze *frz) plan->frzflags == frz->frzflags) return true; - /* Caller must call pg_tde_log_freeze_new_plan again for frz */ + /* Caller must call heap_log_freeze_new_plan again for frz */ return false; } @@ -6851,7 +6848,7 @@ pg_tde_log_freeze_eq(xl_pg_tde_freeze_plan *plan, HeapTupleFreeze *frz) * will have steps required to freeze described by caller's plan during REDO. */ static inline void -pg_tde_log_freeze_new_plan(xl_pg_tde_freeze_plan *plan, HeapTupleFreeze *frz) +heap_log_freeze_new_plan(xl_heap_freeze_plan *plan, HeapTupleFreeze *frz) { plan->xmax = frz->xmax; plan->t_infomask2 = frz->t_infomask2; @@ -6871,14 +6868,14 @@ pg_tde_log_freeze_new_plan(xl_pg_tde_freeze_plan *plan, HeapTupleFreeze *frz) * concern to our caller). */ static int -pg_tde_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, - xl_pg_tde_freeze_plan *plans_out, +heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, + xl_heap_freeze_plan *plans_out, OffsetNumber *offsets_out) { int nplans = 0; /* Sort tuple-based freeze plans in the order required to deduplicate */ - qsort(tuples, ntuples, sizeof(HeapTupleFreeze), pg_tde_log_freeze_cmp); + qsort(tuples, ntuples, sizeof(HeapTupleFreeze), heap_log_freeze_cmp); for (int i = 0; i < ntuples; i++) { @@ -6887,10 +6884,10 @@ pg_tde_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, if (i == 0) { /* New canonical freeze plan starting with first tup */ - pg_tde_log_freeze_new_plan(plans_out, frz); + heap_log_freeze_new_plan(plans_out, frz); nplans++; } - else if (pg_tde_log_freeze_eq(plans_out, frz)) + else if (heap_log_freeze_eq(plans_out, frz)) { /* tup matches open canonical plan -- include tup in it */ Assert(offsets_out[i - 1] < frz->offset); @@ -6902,7 +6899,7 @@ pg_tde_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, plans_out++; /* New canonical freeze plan starting with this tup */ - pg_tde_log_freeze_new_plan(plans_out, frz); + heap_log_freeze_new_plan(plans_out, frz); nplans++; } @@ -6922,13 +6919,13 @@ pg_tde_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, } /* - * pg_tde_freeze_tuple + * heap_freeze_tuple * Freeze tuple in place, without WAL logging. * * Useful for callers like CLUSTER that perform their own WAL logging. */ bool -pg_tde_freeze_tuple(HeapTupleHeader tuple, +heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff) { @@ -6951,7 +6948,7 @@ pg_tde_freeze_tuple(HeapTupleHeader tuple, pagefrz.NoFreezePageRelfrozenXid = FreezeLimit; pagefrz.NoFreezePageRelminMxid = MultiXactCutoff; - do_freeze = pg_tde_prepare_freeze_tuple(tuple, &cutoffs, + do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs, &pagefrz, &frz, &totally_frozen); /* @@ -6960,7 +6957,7 @@ pg_tde_freeze_tuple(HeapTupleHeader tuple, */ if (do_freeze) - pg_tde_execute_freeze_tuple(tuple, &frz); + heap_execute_freeze_tuple(tuple, &frz); return do_freeze; } @@ -7328,13 +7325,13 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, } /* - * pg_tde_tuple_needs_eventual_freeze + * heap_tuple_needs_eventual_freeze * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) * will eventually require freezing (if tuple isn't removed by pruning first). */ bool -pg_tde_tuple_needs_eventual_freeze(HeapTupleHeader tuple) +heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple) { TransactionId xid; @@ -7375,9 +7372,9 @@ pg_tde_tuple_needs_eventual_freeze(HeapTupleHeader tuple) } /* - * pg_tde_tuple_should_freeze + * heap_tuple_should_freeze * - * Return value indicates if pg_tde_prepare_freeze_tuple sibling function would + * Return value indicates if heap_prepare_freeze_tuple sibling function would * (or should) force freezing of the heap page that contains caller's tuple. * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing. * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs. @@ -7389,7 +7386,7 @@ pg_tde_tuple_needs_eventual_freeze(HeapTupleHeader tuple) * point that it fully commits to not freezing the tuple/page in question. */ bool -pg_tde_tuple_should_freeze(HeapTupleHeader tuple, +heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid) @@ -7435,7 +7432,7 @@ pg_tde_tuple_should_freeze(HeapTupleHeader tuple, /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */ if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid)) *NoFreezePageRelminMxid = multi; - /* pg_tde_prepare_freeze_tuple always freezes pg_upgrade'd xmax */ + /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */ freeze = true; } else @@ -7475,7 +7472,7 @@ pg_tde_tuple_should_freeze(HeapTupleHeader tuple, Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid)); if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid)) *NoFreezePageRelfrozenXid = xid; - /* pg_tde_prepare_freeze_tuple forces xvac freezing */ + /* heap_prepare_freeze_tuple forces xvac freezing */ freeze = true; } } @@ -7527,7 +7524,7 @@ HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, #ifdef USE_PREFETCH /* - * Helper function for pg_tde_index_delete_tuples. Issues prefetch requests for + * Helper function for heap_index_delete_tuples. Issues prefetch requests for * prefetch_count buffers. The prefetch_state keeps track of all the buffers * we can prefetch, and which have already been prefetched; each call to this * function picks up where the previous call left off. @@ -7572,7 +7569,7 @@ index_delete_prefetch_buffer(Relation rel, #endif /* - * Helper function for pg_tde_index_delete_tuples. Checks for index corruption + * Helper function for heap_index_delete_tuples. Checks for index corruption * involving an invalid TID in index AM caller's index page. * * This is an ideal place for these checks. The index AM must hold a buffer @@ -7616,7 +7613,6 @@ index_delete_check_htid(TM_IndexDeleteOp *delstate, Assert(ItemIdIsNormal(iid)); htup = (HeapTupleHeader) PageGetItem(page, iid); - // TODO: Decryption/encryption here if (unlikely(HeapTupleHeaderIsHeapOnly(htup))) ereport(ERROR, @@ -7643,7 +7639,7 @@ index_delete_check_htid(TM_IndexDeleteOp *delstate, * the same heap block. */ TransactionId -pg_tde_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) +heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) { /* Initial assumption is that earlier pruning took care of conflict */ TransactionId snapshotConflictHorizon = InvalidTransactionId; @@ -7839,7 +7835,7 @@ pg_tde_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) HeapTupleData heapTuple; /* Are any tuples from this HOT chain non-vacuumable? */ - if (pg_tde_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable, + if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable, &heapTuple, NULL, true)) continue; /* can't delete entry */ @@ -7904,7 +7900,6 @@ pg_tde_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate) break; htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here /* * Check the tuple XMIN against prior XMAX, if any @@ -7980,7 +7975,7 @@ index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2) /* * Sort deltids array from delstate by TID. This prepares it for further - * processing by pg_tde_index_delete_tuples(). + * processing by heap_index_delete_tuples(). * * This operation becomes a noticeable consumer of CPU cycles with some * workloads, so we go to the trouble of specialization/micro optimization. @@ -8033,10 +8028,10 @@ index_delete_sort(TM_IndexDeleteOp *delstate) * deletion. In the worst case (i.e. with totally random heap blocks) the * first block in line (the only favorable block) can be thought of as a * degenerate array of contiguous blocks that consists of a single block. - * pg_tde_index_delete_tuples() will expect this. + * heap_index_delete_tuples() will expect this. * * Caller passes blockgroups, a description of the final order that deltids - * will be sorted in for pg_tde_index_delete_tuples() bottom-up index deletion + * will be sorted in for heap_index_delete_tuples() bottom-up index deletion * processing. Note that deltids need not actually be sorted just yet (caller * only passes deltids to us so that we can interpret blockgroups). * @@ -8077,7 +8072,7 @@ index_delete_sort(TM_IndexDeleteOp *delstate) * the indexes are logically modified by the UPDATE statements (if they were * then bottom-up index deletion would not be triggered in the first place). * Naturally, each new round of index tuples (for each heap tuple that gets a - * pg_tde_update() call) will have the same heap TID in each and every index. + * heap_update() call) will have the same heap TID in each and every index. * Since these indexes are low cardinality and never get logically modified, * heapam processing during bottom-up deletion passes will access heap blocks * in approximately sequential order. Temporal locality of access occurs due @@ -8184,7 +8179,7 @@ bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2) } /* - * pg_tde_index_delete_tuples() helper function for bottom-up deletion callers. + * heap_index_delete_tuples() helper function for bottom-up deletion callers. * * Sorts deltids array in the order needed for useful processing by bottom-up * deletion. The array should already be sorted in TID order when we're @@ -8326,19 +8321,19 @@ bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate) * marked all-visible. REDO routine uses it to generate recovery conflicts. * * If checksums or wal_log_hints are enabled, we may also generate a full-page - * image of pg_tde_buffer. Otherwise, we optimize away the FPI (by specifying + * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not* * update the heap page's LSN. */ XLogRecPtr -log_pg_tde_visible(Relation rel, Buffer pg_tde_buffer, Buffer vm_buffer, +log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags) { - xl_pg_tde_visible xlrec; + xl_heap_visible xlrec; XLogRecPtr recptr; uint8 flags; - Assert(BufferIsValid(pg_tde_buffer)); + Assert(BufferIsValid(heap_buffer)); Assert(BufferIsValid(vm_buffer)); xlrec.snapshotConflictHorizon = snapshotConflictHorizon; @@ -8353,7 +8348,7 @@ log_pg_tde_visible(Relation rel, Buffer pg_tde_buffer, Buffer vm_buffer, flags = REGBUF_STANDARD; if (!XLogHintBitIsNeeded()) flags |= REGBUF_NO_IMAGE; - XLogRegisterBuffer(1, pg_tde_buffer, flags); + XLogRegisterBuffer(1, heap_buffer, flags); recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE); @@ -8365,14 +8360,14 @@ log_pg_tde_visible(Relation rel, Buffer pg_tde_buffer, Buffer vm_buffer, * have modified the buffer(s) and marked them dirty. */ static XLogRecPtr -log_pg_tde_update(Relation reln, Buffer oldbuf, +log_heap_update(Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared) { - xl_pg_tde_update xlrec; - xl_pg_tde_header xlhdr; - xl_pg_tde_header xlhdr_idx; + xl_heap_update xlrec; + xl_heap_header xlhdr; + xl_heap_header xlhdr_idx; uint8 info; uint16 prefix_suffix[2]; uint16 prefixlen = 0, @@ -8587,9 +8582,9 @@ log_pg_tde_update(Relation reln, Buffer oldbuf, * tuples. */ static XLogRecPtr -log_pg_tde_new_cid(Relation relation, HeapTuple tup) +log_heap_new_cid(Relation relation, HeapTuple tup) { - xl_pg_tde_new_cid xlrec; + xl_heap_new_cid xlrec; XLogRecPtr recptr; HeapTupleHeader hdr = tup->t_data; @@ -8710,8 +8705,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, /* * If there's no defined replica identity columns, treat as !key_required. - * (This case should not be reachable from pg_tde_update, since that should - * calculate key_required accurately. But pg_tde_delete just passes + * (This case should not be reachable from heap_update, since that should + * calculate key_required accurately. But heap_delete just passes * constant true for key_required, so we can hit this case in deletes.) */ if (bms_is_empty(idattrs)) @@ -8722,7 +8717,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, * with nulls elsewhere. While we're at it, assert that the replica * identity columns aren't null. */ - pg_tde_deform_tuple(tp, desc, values, nulls); + heap_deform_tuple(tp, desc, values, nulls); for (int i = 0; i < desc->natts; i++) { @@ -8733,7 +8728,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, nulls[i] = true; } - key_tuple = pg_tde_form_tuple(desc, values, nulls); + key_tuple = heap_form_tuple(desc, values, nulls); *copy = true; bms_free(idattrs); @@ -8750,7 +8745,7 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, HeapTuple oldtup = key_tuple; key_tuple = toast_flatten_tuple(oldtup, desc); - pg_tde_freetuple(oldtup); + heap_freetuple(oldtup); } return key_tuple; @@ -8762,10 +8757,10 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required, * Acquires a full cleanup lock. */ static void -pg_tde_xlog_prune(XLogReaderState *record) +heap_xlog_prune(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_prune *xlrec = (xl_pg_tde_prune *) XLogRecGetData(record); + xl_heap_prune *xlrec = (xl_heap_prune *) XLogRecGetData(record); Buffer buffer; RelFileLocator rlocator; BlockNumber blkno; @@ -8811,7 +8806,7 @@ pg_tde_xlog_prune(XLogReaderState *record) Assert(nunused >= 0); /* Update all line pointers per the record, and repair fragmentation */ - pg_tde_page_prune_execute(buffer, + heap_page_prune_execute(buffer, redirected, nredirected, nowdead, ndead, nowunused, nunused); @@ -8850,10 +8845,10 @@ pg_tde_xlog_prune(XLogReaderState *record) * Acquires an ordinary exclusive lock only. */ static void -pg_tde_xlog_vacuum(XLogReaderState *record) +heap_xlog_vacuum(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_vacuum *xlrec = (xl_pg_tde_vacuum *) XLogRecGetData(record); + xl_heap_vacuum *xlrec = (xl_heap_vacuum *) XLogRecGetData(record); Buffer buffer; BlockNumber blkno; XLogRedoAction action; @@ -8925,10 +8920,10 @@ pg_tde_xlog_vacuum(XLogReaderState *record) * page modification would fail to clear the visibility map bit. */ static void -pg_tde_xlog_visible(XLogReaderState *record) +heap_xlog_visible(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_visible *xlrec = (xl_pg_tde_visible *) XLogRecGetData(record); + xl_heap_visible *xlrec = (xl_heap_visible *) XLogRecGetData(record); Buffer vmbuffer = InvalidBuffer; Buffer buffer; Page page; @@ -9036,14 +9031,14 @@ pg_tde_xlog_visible(XLogReaderState *record) /* * XLogReadBufferForRedoExtended locked the buffer. But - * pg_tde_visibilitymap_set will handle locking itself. + * visibilitymap_set will handle locking itself. */ LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK); reln = CreateFakeRelcacheEntry(rlocator); - pg_tde_visibilitymap_pin(reln, blkno, &vmbuffer); + visibilitymap_pin(reln, blkno, &vmbuffer); - pg_tde_visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer, + visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer, xlrec->snapshotConflictHorizon, vmbits); ReleaseBuffer(vmbuffer); @@ -9057,10 +9052,10 @@ pg_tde_xlog_visible(XLogReaderState *record) * Replay XLOG_HEAP2_FREEZE_PAGE records */ static void -pg_tde_xlog_freeze_page(XLogReaderState *record) +heap_xlog_freeze_page(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_freeze_page *xlrec = (xl_pg_tde_freeze_page *) XLogRecGetData(record); + xl_heap_freeze_page *xlrec = (xl_heap_freeze_page *) XLogRecGetData(record); Buffer buffer; /* @@ -9080,21 +9075,21 @@ pg_tde_xlog_freeze_page(XLogReaderState *record) if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO) { Page page = BufferGetPage(buffer); - xl_pg_tde_freeze_plan *plans; + xl_heap_freeze_plan *plans; OffsetNumber *offsets; int curoff = 0; - plans = (xl_pg_tde_freeze_plan *) XLogRecGetBlockData(record, 0, NULL); + plans = (xl_heap_freeze_plan *) XLogRecGetBlockData(record, 0, NULL); offsets = (OffsetNumber *) ((char *) plans + (xlrec->nplans * - sizeof(xl_pg_tde_freeze_plan))); + sizeof(xl_heap_freeze_plan))); for (int p = 0; p < xlrec->nplans; p++) { HeapTupleFreeze frz; /* * Convert freeze plan representation from WAL record into - * per-tuple format used by pg_tde_execute_freeze_tuple + * per-tuple format used by heap_execute_freeze_tuple */ frz.xmax = plans[p].xmax; frz.t_infomask2 = plans[p].t_infomask2; @@ -9110,8 +9105,7 @@ pg_tde_xlog_freeze_page(XLogReaderState *record) lp = PageGetItemId(page, offset); tuple = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here - pg_tde_execute_freeze_tuple(tuple, &frz); + heap_execute_freeze_tuple(tuple, &frz); } } @@ -9150,10 +9144,10 @@ fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2) } static void -pg_tde_xlog_delete(XLogReaderState *record) +heap_xlog_delete(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_delete *xlrec = (xl_pg_tde_delete *) XLogRecGetData(record); + xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record); Buffer buffer; Page page; ItemId lp = NULL; @@ -9175,8 +9169,8 @@ pg_tde_xlog_delete(XLogReaderState *record) Relation reln = CreateFakeRelcacheEntry(target_locator); Buffer vmbuffer = InvalidBuffer; - pg_tde_visibilitymap_pin(reln, blkno, &vmbuffer); - pg_tde_visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); + visibilitymap_pin(reln, blkno, &vmbuffer); + visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); } @@ -9192,7 +9186,6 @@ pg_tde_xlog_delete(XLogReaderState *record) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); htup->t_infomask2 &= ~HEAP_KEYS_UPDATED; @@ -9224,10 +9217,10 @@ pg_tde_xlog_delete(XLogReaderState *record) } static void -pg_tde_xlog_insert(XLogReaderState *record) +heap_xlog_insert(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_insert *xlrec = (xl_pg_tde_insert *) XLogRecGetData(record); + xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record); Buffer buffer; Page page; union @@ -9236,7 +9229,7 @@ pg_tde_xlog_insert(XLogReaderState *record) char data[MaxHeapTupleSize]; } tbuf; HeapTupleHeader htup; - xl_pg_tde_header xlhdr; + xl_heap_header xlhdr; uint32 newlen; Size freespace = 0; RelFileLocator target_locator; @@ -9257,8 +9250,8 @@ pg_tde_xlog_insert(XLogReaderState *record) Relation reln = CreateFakeRelcacheEntry(target_locator); Buffer vmbuffer = InvalidBuffer; - pg_tde_visibilitymap_pin(reln, blkno, &vmbuffer); - pg_tde_visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); + visibilitymap_pin(reln, blkno, &vmbuffer); + visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); } @@ -9344,10 +9337,10 @@ pg_tde_xlog_insert(XLogReaderState *record) * Handles MULTI_INSERT record type. */ static void -pg_tde_xlog_multi_insert(XLogReaderState *record) +heap_xlog_multi_insert(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_multi_insert *xlrec; + xl_heap_multi_insert *xlrec; RelFileLocator rlocator; BlockNumber blkno; Buffer buffer; @@ -9368,7 +9361,7 @@ pg_tde_xlog_multi_insert(XLogReaderState *record) * Insertion doesn't overwrite MVCC data, so no conflict processing is * required. */ - xlrec = (xl_pg_tde_multi_insert *) XLogRecGetData(record); + xlrec = (xl_heap_multi_insert *) XLogRecGetData(record); XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno); @@ -9385,8 +9378,8 @@ pg_tde_xlog_multi_insert(XLogReaderState *record) Relation reln = CreateFakeRelcacheEntry(rlocator); Buffer vmbuffer = InvalidBuffer; - pg_tde_visibilitymap_pin(reln, blkno, &vmbuffer); - pg_tde_visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); + visibilitymap_pin(reln, blkno, &vmbuffer); + visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); } @@ -9491,10 +9484,10 @@ pg_tde_xlog_multi_insert(XLogReaderState *record) * Handles UPDATE and HOT_UPDATE */ static void -pg_tde_xlog_update(XLogReaderState *record, bool hot_update) +heap_xlog_update(XLogReaderState *record, bool hot_update) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_update *xlrec = (xl_pg_tde_update *) XLogRecGetData(record); + xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record); RelFileLocator rlocator; BlockNumber oldblk; BlockNumber newblk; @@ -9514,7 +9507,7 @@ pg_tde_xlog_update(XLogReaderState *record, bool hot_update) HeapTupleHeaderData hdr; char data[MaxHeapTupleSize]; } tbuf; - xl_pg_tde_header xlhdr; + xl_heap_header xlhdr; uint32 newlen; Size freespace = 0; XLogRedoAction oldaction; @@ -9544,8 +9537,8 @@ pg_tde_xlog_update(XLogReaderState *record, bool hot_update) Relation reln = CreateFakeRelcacheEntry(rlocator); Buffer vmbuffer = InvalidBuffer; - pg_tde_visibilitymap_pin(reln, oldblk, &vmbuffer); - pg_tde_visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS); + visibilitymap_pin(reln, oldblk, &vmbuffer); + visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); } @@ -9574,7 +9567,6 @@ pg_tde_xlog_update(XLogReaderState *record, bool hot_update) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here oldtup.t_data = htup; oldtup.t_len = ItemIdGetLength(lp); @@ -9629,8 +9621,8 @@ pg_tde_xlog_update(XLogReaderState *record, bool hot_update) Relation reln = CreateFakeRelcacheEntry(rlocator); Buffer vmbuffer = InvalidBuffer; - pg_tde_visibilitymap_pin(reln, newblk, &vmbuffer); - pg_tde_visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS); + visibilitymap_pin(reln, newblk, &vmbuffer); + visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); } @@ -9764,10 +9756,10 @@ pg_tde_xlog_update(XLogReaderState *record, bool hot_update) } static void -pg_tde_xlog_confirm(XLogReaderState *record) +heap_xlog_confirm(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_confirm *xlrec = (xl_pg_tde_confirm *) XLogRecGetData(record); + xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record); Buffer buffer; Page page; OffsetNumber offnum; @@ -9786,7 +9778,6 @@ pg_tde_xlog_confirm(XLogReaderState *record) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here /* * Confirm tuple as actually inserted @@ -9801,10 +9792,10 @@ pg_tde_xlog_confirm(XLogReaderState *record) } static void -pg_tde_xlog_lock(XLogReaderState *record) +heap_xlog_lock(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_lock *xlrec = (xl_pg_tde_lock *) XLogRecGetData(record); + xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record); Buffer buffer; Page page; OffsetNumber offnum; @@ -9825,8 +9816,8 @@ pg_tde_xlog_lock(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block); reln = CreateFakeRelcacheEntry(rlocator); - pg_tde_visibilitymap_pin(reln, block, &vmbuffer); - pg_tde_visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN); + visibilitymap_pin(reln, block, &vmbuffer); + visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); @@ -9844,7 +9835,6 @@ pg_tde_xlog_lock(XLogReaderState *record) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); htup->t_infomask2 &= ~HEAP_KEYS_UPDATED; @@ -9873,17 +9863,17 @@ pg_tde_xlog_lock(XLogReaderState *record) } static void -pg_tde_xlog_lock_updated(XLogReaderState *record) +heap_xlog_lock_updated(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_lock_updated *xlrec; + xl_heap_lock_updated *xlrec; Buffer buffer; Page page; OffsetNumber offnum; ItemId lp = NULL; HeapTupleHeader htup; - xlrec = (xl_pg_tde_lock_updated *) XLogRecGetData(record); + xlrec = (xl_heap_lock_updated *) XLogRecGetData(record); /* * The visibility map may need to be fixed even if the heap page is @@ -9899,8 +9889,8 @@ pg_tde_xlog_lock_updated(XLogReaderState *record) XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block); reln = CreateFakeRelcacheEntry(rlocator); - pg_tde_visibilitymap_pin(reln, block, &vmbuffer); - pg_tde_visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN); + visibilitymap_pin(reln, block, &vmbuffer); + visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN); ReleaseBuffer(vmbuffer); FreeFakeRelcacheEntry(reln); @@ -9918,7 +9908,6 @@ pg_tde_xlog_lock_updated(XLogReaderState *record) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED); htup->t_infomask2 &= ~HEAP_KEYS_UPDATED; @@ -9934,10 +9923,10 @@ pg_tde_xlog_lock_updated(XLogReaderState *record) } static void -pg_tde_xlog_inplace(XLogReaderState *record) +heap_xlog_inplace(XLogReaderState *record) { XLogRecPtr lsn = record->EndRecPtr; - xl_pg_tde_inplace *xlrec = (xl_pg_tde_inplace *) XLogRecGetData(record); + xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record); Buffer buffer; Page page; OffsetNumber offnum; @@ -9960,7 +9949,6 @@ pg_tde_xlog_inplace(XLogReaderState *record) elog(PANIC, "invalid lp"); htup = (HeapTupleHeader) PageGetItem(page, lp); - // TODO: Decryption/encryption here oldlen = ItemIdGetLength(lp) - htup->t_hoff; if (oldlen != newlen) @@ -9976,7 +9964,7 @@ pg_tde_xlog_inplace(XLogReaderState *record) } void -pg_tde_redo(XLogReaderState *record) +heap_redo(XLogReaderState *record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; @@ -9988,13 +9976,13 @@ pg_tde_redo(XLogReaderState *record) switch (info & XLOG_HEAP_OPMASK) { case XLOG_HEAP_INSERT: - pg_tde_xlog_insert(record); + heap_xlog_insert(record); break; case XLOG_HEAP_DELETE: - pg_tde_xlog_delete(record); + heap_xlog_delete(record); break; case XLOG_HEAP_UPDATE: - pg_tde_xlog_update(record, false); + heap_xlog_update(record, false); break; case XLOG_HEAP_TRUNCATE: @@ -10005,46 +9993,46 @@ pg_tde_redo(XLogReaderState *record) */ break; case XLOG_HEAP_HOT_UPDATE: - pg_tde_xlog_update(record, true); + heap_xlog_update(record, true); break; case XLOG_HEAP_CONFIRM: - pg_tde_xlog_confirm(record); + heap_xlog_confirm(record); break; case XLOG_HEAP_LOCK: - pg_tde_xlog_lock(record); + heap_xlog_lock(record); break; case XLOG_HEAP_INPLACE: - pg_tde_xlog_inplace(record); + heap_xlog_inplace(record); break; default: - elog(PANIC, "pg_tde_redo: unknown op code %u", info); + elog(PANIC, "heap_redo: unknown op code %u", info); } } void -pg_tde2_redo(XLogReaderState *record) +heap2_redo(XLogReaderState *record) { uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; switch (info & XLOG_HEAP_OPMASK) { case XLOG_HEAP2_PRUNE: - pg_tde_xlog_prune(record); + heap_xlog_prune(record); break; case XLOG_HEAP2_VACUUM: - pg_tde_xlog_vacuum(record); + heap_xlog_vacuum(record); break; case XLOG_HEAP2_FREEZE_PAGE: - pg_tde_xlog_freeze_page(record); + heap_xlog_freeze_page(record); break; case XLOG_HEAP2_VISIBLE: - pg_tde_xlog_visible(record); + heap_xlog_visible(record); break; case XLOG_HEAP2_MULTI_INSERT: - pg_tde_xlog_multi_insert(record); + heap_xlog_multi_insert(record); break; case XLOG_HEAP2_LOCK_UPDATED: - pg_tde_xlog_lock_updated(record); + heap_xlog_lock_updated(record); break; case XLOG_HEAP2_NEW_CID: @@ -10054,7 +10042,7 @@ pg_tde2_redo(XLogReaderState *record) */ break; case XLOG_HEAP2_REWRITE: - pg_tde_xlog_logical_rewrite(record); + heap_xlog_logical_rewrite(record); break; default: elog(PANIC, "heap2_redo: unknown op code %u", info); @@ -10065,7 +10053,7 @@ pg_tde2_redo(XLogReaderState *record) * Mask a heap page before performing consistency checks on it. */ void -pg_tde_mask(char *pagedata, BlockNumber blkno) +heap_mask(char *pagedata, BlockNumber blkno) { Page page = (Page) pagedata; OffsetNumber off; @@ -10102,18 +10090,18 @@ pg_tde_mask(char *pagedata, BlockNumber blkno) /* * During replay, we set Command Id to FirstCommandId. Hence, mask - * it. See pg_tde_xlog_insert() for details. + * it. See heap_xlog_insert() for details. */ page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER; /* - * For a speculative tuple, pg_tde_insert() does not set ctid in the + * For a speculative tuple, heap_insert() does not set ctid in the * caller-passed heap tuple itself, leaving the ctid field to * contain a speculative token value - a per-backend monotonically * increasing identifier. Besides, it does not WAL-log ctid under * any circumstances. * - * During redo, pg_tde_xlog_insert() sets t_ctid to current block + * During redo, heap_xlog_insert() sets t_ctid to current block * number and self offset number. It doesn't care about any * speculative insertions on the primary. Hence, we set t_ctid to * current block number and self offset number to ignore any diff --git a/src/access/pg_tdeam_handler.c b/src/access/heapam_handler.c similarity index 91% rename from src/access/pg_tdeam_handler.c rename to src/access/heapam_handler.c index 23a6514b..eab11641 100644 --- a/src/access/pg_tdeam_handler.c +++ b/src/access/heapam_handler.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tdeam_handler.c + * heapam_handler.c * heap table access method code * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * src/backend/access/heap/pg_tdeam_handler.c + * src/backend/access/heap/heapam_handler.c * * * NOTES @@ -17,20 +17,14 @@ * *------------------------------------------------------------------------- */ - -#include "pg_tde_defines.h" - #include "postgres.h" -#include "access/pg_tdeam.h" -#include "access/pg_tdetoast.h" -#include "access/pg_tde_rewrite.h" -#include "access/pg_tde_tdemap.h" - -#include "encryption/enc_tuple.h" - #include "access/genam.h" +#include "access/heapam.h" +#include "access/heaptoast.h" #include "access/multixact.h" +#include "access/rewriteheap.h" +#include "access/pg_tde_tdemap.h" #include "access/syncscan.h" #include "access/tableam.h" #include "access/tsmapi.h" @@ -41,6 +35,7 @@ #include "catalog/storage_xlog.h" #include "commands/progress.h" #include "executor/executor.h" +#include "encryption/enc_tuple.h" #include "miscadmin.h" #include "pgstat.h" #include "storage/bufmgr.h" @@ -54,7 +49,6 @@ PG_FUNCTION_INFO_V1(pg_tdeam_handler); - static void reform_and_rewrite_tuple(HeapTuple tuple, Relation OldHeap, Relation NewHeap, Datum *values, bool *isnull, RewriteState rwstate); @@ -63,9 +57,9 @@ static bool SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, HeapTuple tuple, OffsetNumber tupoffset); -static BlockNumber pg_tdeam_scan_get_blocks_done(HeapScanDesc hscan); +static BlockNumber heapam_scan_get_blocks_done(HeapScanDesc hscan); -static const TableAmRoutine pg_tdeam_methods; +static const TableAmRoutine heapam_methods; /* ------------------------------------------------------------------------ @@ -74,7 +68,7 @@ static const TableAmRoutine pg_tdeam_methods; */ static const TupleTableSlotOps * -pg_tdeam_slot_callbacks(Relation relation) +heapam_slot_callbacks(Relation relation) { return &TTSOpsBufferHeapTuple; } @@ -86,7 +80,7 @@ pg_tdeam_slot_callbacks(Relation relation) */ static IndexFetchTableData * -pg_tdeam_index_fetch_begin(Relation rel) +heapam_index_fetch_begin(Relation rel) { IndexFetchHeapData *hscan = palloc0(sizeof(IndexFetchHeapData)); @@ -97,7 +91,7 @@ pg_tdeam_index_fetch_begin(Relation rel) } static void -pg_tdeam_index_fetch_reset(IndexFetchTableData *scan) +heapam_index_fetch_reset(IndexFetchTableData *scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan; @@ -109,17 +103,17 @@ pg_tdeam_index_fetch_reset(IndexFetchTableData *scan) } static void -pg_tdeam_index_fetch_end(IndexFetchTableData *scan) +heapam_index_fetch_end(IndexFetchTableData *scan) { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan; - pg_tdeam_index_fetch_reset(scan); + heapam_index_fetch_reset(scan); pfree(hscan); } static bool -pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, +heapam_index_fetch_tuple(struct IndexFetchTableData *scan, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, @@ -127,7 +121,7 @@ pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, { IndexFetchHeapData *hscan = (IndexFetchHeapData *) scan; BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot; - bool got_pg_tde_tuple; + bool got_heap_tuple; Assert(TTS_IS_BUFFERTUPLE(slot)); @@ -145,12 +139,12 @@ pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, * Prune page, but only if we weren't already on this page */ if (prev_buf != hscan->xs_cbuf) - pg_tde_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf); + heap_page_prune_opt(hscan->xs_base.rel, hscan->xs_cbuf); } /* Obtain share-lock on the buffer so we can examine visibility */ LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_SHARE); - got_pg_tde_tuple = pg_tde_hot_search_buffer(tid, + got_heap_tuple = heap_hot_search_buffer(tid, hscan->xs_base.rel, hscan->xs_cbuf, snapshot, @@ -160,7 +154,7 @@ pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, bslot->base.tupdata.t_self = *tid; LockBuffer(hscan->xs_cbuf, BUFFER_LOCK_UNLOCK); - if (got_pg_tde_tuple) + if (got_heap_tuple) { /* * Only in a non-MVCC snapshot can more than one member of the HOT @@ -177,7 +171,7 @@ pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, *call_again = false; } - return got_pg_tde_tuple; + return got_heap_tuple; } @@ -187,7 +181,7 @@ pg_tdeam_index_fetch_tuple(struct IndexFetchTableData *scan, */ static bool -pg_tdeam_fetch_row_version(Relation relation, +heapam_fetch_row_version(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot) @@ -198,7 +192,7 @@ pg_tdeam_fetch_row_version(Relation relation, Assert(TTS_IS_BUFFERTUPLE(slot)); bslot->base.tupdata.t_self = *tid; - if (pg_tde_fetch(relation, snapshot, &bslot->base.tupdata, &buffer, false)) + if (heap_fetch(relation, snapshot, &bslot->base.tupdata, &buffer, false)) { /* store in slot, transferring existing pin */ PGTdeExecStorePinnedBufferHeapTuple(relation, &bslot->base.tupdata, slot, buffer); @@ -211,7 +205,7 @@ pg_tdeam_fetch_row_version(Relation relation, } static bool -pg_tdeam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid) +heapam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid) { HeapScanDesc hscan = (HeapScanDesc) scan; @@ -220,7 +214,7 @@ pg_tdeam_tuple_tid_valid(TableScanDesc scan, ItemPointer tid) } static bool -pg_tdeam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, +heapam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, Snapshot snapshot) { BufferHeapTupleTableSlot *bslot = (BufferHeapTupleTableSlot *) slot; @@ -248,7 +242,7 @@ pg_tdeam_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot, */ static void -pg_tdeam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, +heapam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate) { bool shouldFree = true; @@ -259,7 +253,7 @@ pg_tdeam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, tuple->t_tableOid = slot->tts_tableOid; /* Perform the insertion, and copy the resulting ItemPointer */ - pg_tde_insert(relation, tuple, cid, options, bistate); + heap_insert(relation, tuple, cid, options, bistate); ItemPointerCopy(&tuple->t_self, &slot->tts_tid); if (shouldFree) @@ -267,7 +261,7 @@ pg_tdeam_tuple_insert(Relation relation, TupleTableSlot *slot, CommandId cid, } static void -pg_tdeam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, +heapam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, CommandId cid, int options, BulkInsertState bistate, uint32 specToken) { @@ -282,7 +276,7 @@ pg_tdeam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, options |= HEAP_INSERT_SPECULATIVE; /* Perform the insertion, and copy the resulting ItemPointer */ - pg_tde_insert(relation, tuple, cid, options, bistate); + heap_insert(relation, tuple, cid, options, bistate); ItemPointerCopy(&tuple->t_self, &slot->tts_tid); if (shouldFree) @@ -290,7 +284,7 @@ pg_tdeam_tuple_insert_speculative(Relation relation, TupleTableSlot *slot, } static void -pg_tdeam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, +heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, uint32 specToken, bool succeeded) { bool shouldFree = true; @@ -298,16 +292,16 @@ pg_tdeam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot, /* adjust the tuple's state accordingly */ if (succeeded) - pg_tde_finish_speculative(relation, &slot->tts_tid); + heap_finish_speculative(relation, &slot->tts_tid); else - pg_tde_abort_speculative(relation, &slot->tts_tid); + heap_abort_speculative(relation, &slot->tts_tid); if (shouldFree) pfree(tuple); } static TM_Result -pg_tdeam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, +heapam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart) { @@ -316,12 +310,12 @@ pg_tdeam_tuple_delete(Relation relation, ItemPointer tid, CommandId cid, * the storage itself is cleaning the dead tuples by itself, it is the * time to call the index tuple deletion also. */ - return pg_tde_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart); + return heap_delete(relation, tid, cid, crosscheck, wait, tmfd, changingPart); } static TM_Result -pg_tdeam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, +heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, CommandId cid, Snapshot snapshot, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes) @@ -334,14 +328,14 @@ pg_tdeam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, slot->tts_tableOid = RelationGetRelid(relation); tuple->t_tableOid = slot->tts_tableOid; - result = pg_tde_update(relation, otid, tuple, cid, crosscheck, wait, + result = heap_update(relation, otid, tuple, cid, crosscheck, wait, tmfd, lockmode, update_indexes); ItemPointerCopy(&tuple->t_self, &slot->tts_tid); /* * Decide whether new index entries are needed for the tuple * - * Note: pg_tde_update returns the tid (location) of the new tuple in the + * Note: heap_update returns the tid (location) of the new tuple in the * t_self field. * * If the update is not HOT, we must update all indexes. If the update is @@ -366,7 +360,7 @@ pg_tdeam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot, } static TM_Result -pg_tdeam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, +heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, TupleTableSlot *slot, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, uint8 flags, TM_FailureData *tmfd) @@ -384,7 +378,7 @@ pg_tdeam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, tuple_lock_retry: tuple->t_self = *tid; - result = pg_tde_lock_tuple(relation, tuple, cid, mode, wait_policy, + result = heap_lock_tuple(relation, tuple, cid, mode, wait_policy, follow_updates, &buffer, tmfd); if (result == TM_Updated && @@ -422,7 +416,7 @@ pg_tdeam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, errmsg("tuple to be locked was already moved to another partition due to concurrent update"))); tuple->t_self = *tid; - if (pg_tde_fetch(relation, &SnapshotDirty, tuple, &buffer, true)) + if (heap_fetch(relation, &SnapshotDirty, tuple, &buffer, true)) { /* * If xmin isn't what we're expecting, the slot must have @@ -478,14 +472,14 @@ pg_tdeam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, RelationGetRelationName(relation)))); break; } - continue; /* loop back to repeat pg_tde_fetch */ + continue; /* loop back to repeat heap_fetch */ } /* * If tuple was inserted by our own transaction, we have * to check cmin against cid: cmin >= current CID means * our command cannot see the tuple, so we should ignore - * it. Otherwise pg_tde_lock_tuple() will throw an error, + * it. Otherwise heap_lock_tuple() will throw an error, * and so would any later attempt to update or delete the * tuple. (We need not check cmax because * HeapTupleSatisfiesDirty will consider a tuple deleted @@ -587,7 +581,7 @@ pg_tdeam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot, */ static void -pg_tdeam_relation_set_new_filelocator(Relation rel, +heapam_relation_set_new_filelocator(Relation rel, const RelFileLocator *newrlocator, char persistence, TransactionId *freezeXid, @@ -644,13 +638,13 @@ pg_tdeam_relation_set_new_filelocator(Relation rel, } static void -pg_tdeam_relation_nontransactional_truncate(Relation rel) +heapam_relation_nontransactional_truncate(Relation rel) { RelationTruncate(rel, 0); } static void -pg_tdeam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator) +heapam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator) { SMgrRelation dstrel; @@ -705,7 +699,7 @@ pg_tdeam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator) } static void -pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, +heapam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, Relation OldIndex, bool use_sort, TransactionId OldestXmin, TransactionId *xid_cutoff, @@ -744,7 +738,7 @@ pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, isnull = (bool *) palloc(natts * sizeof(bool)); /* Initialize the rewrite operation */ - rwstate = begin_pg_tde_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff, + rwstate = begin_heap_rewrite(OldHeap, NewHeap, OldestXmin, *xid_cutoff, *multi_cutoff); @@ -826,9 +820,9 @@ pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, { /* * If the last pages of the scan were empty, we would go to - * the next phase while pg_tde_blks_scanned != pg_tde_blks_total. - * Instead, to ensure that pg_tde_blks_scanned is equivalent to - * pg_tde_blks_total after the table scan phase, this parameter + * the next phase while heap_blks_scanned != heap_blks_total. + * Instead, to ensure that heap_blks_scanned is equivalent to + * heap_blks_total after the table scan phase, this parameter * is manually updated to the correct value when the table * scan finishes. */ @@ -917,7 +911,7 @@ pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, { *tups_vacuumed += 1; /* heap rewrite module still needs to see it... */ - if (rewrite_pg_tde_dead_tuple(rwstate, tuple)) + if (rewrite_heap_dead_tuple(rwstate, tuple)) { /* A previous recently-dead tuple is now known dead */ *tups_vacuumed += 1; @@ -1008,7 +1002,7 @@ pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, } /* Write out any remaining tuples, and fsync if needed */ - end_pg_tde_rewrite(rwstate); + end_heap_rewrite(rwstate); /* Clean up */ pfree(values); @@ -1016,7 +1010,7 @@ pg_tdeam_relation_copy_for_cluster(Relation OldHeap, Relation NewHeap, } static bool -pg_tdeam_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, +heapam_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, BufferAccessStrategy bstrategy) { HeapScanDesc hscan = (HeapScanDesc) scan; @@ -1041,7 +1035,7 @@ pg_tdeam_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno, } static bool -pg_tdeam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, +heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, double *liverows, double *deadrows, TupleTableSlot *slot) { @@ -1069,7 +1063,7 @@ pg_tdeam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, * We ignore unused and redirect line pointers. DEAD line pointers * should be counted as dead, because we need vacuum to run to get rid * of them. Note that this rule agrees with the way that - * pg_tde_page_prune() counts things. + * heap_page_prune() counts things. */ if (!ItemIdIsNormal(itemid)) { @@ -1184,7 +1178,7 @@ pg_tdeam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin, } static double -pg_tdeam_index_build_range_scan(Relation heapRelation, +heapam_index_build_range_scan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, bool allow_sync, @@ -1330,7 +1324,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, /* set our scan endpoints */ if (!allow_sync) - pg_tde_setscanlimits(scan, start_blockno, numblocks); + heap_setscanlimits(scan, start_blockno, numblocks); else { /* syncscan can only be requested on whole relation */ @@ -1343,7 +1337,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, /* * Scan all tuples in the base relation. */ - while ((heapTuple = pg_tde_getnext(scan, ForwardScanDirection)) != NULL) + while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { bool tupleIsAlive; @@ -1352,7 +1346,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, /* Report scan progress, if asked to. */ if (progress) { - BlockNumber blocks_done = pg_tdeam_scan_get_blocks_done(hscan); + BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan); if (blocks_done != previous_blkno) { @@ -1369,7 +1363,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, * the HOT-chain structure in the heap. So we need to be able to find * the root item offset for every tuple that's in a HOT-chain. When * first reaching a new page of the relation, call - * pg_tde_get_root_tuples() to build a map of root item offsets on the + * heap_get_root_tuples() to build a map of root item offsets on the * page. * * It might look unsafe to use this information across buffer @@ -1398,7 +1392,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, Page page = BufferGetPage(hscan->rs_cbuf); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - pg_tde_get_root_tuples(page, root_offsets); + heap_get_root_tuples(page, root_offsets); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); root_blkno = hscan->rs_cblock; @@ -1422,7 +1416,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, /* * The criteria for counting a tuple as live in this block need to - * match what analyze.c's pg_tdeam_scan_analyze_next_tuple() does, + * match what analyze.c's heapam_scan_analyze_next_tuple() does, * otherwise CREATE INDEX and ANALYZE may produce wildly different * reltuples values, e.g. when there are many recently-dead * tuples. @@ -1456,7 +1450,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, * index as unusable for them. * * We don't count recently-dead tuples in reltuples, even - * if we index them; see pg_tdeam_scan_analyze_next_tuple(). + * if we index them; see heapam_scan_analyze_next_tuple(). */ if (HeapTupleIsHotUpdated(heapTuple)) { @@ -1521,7 +1515,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, { /* * For consistency with - * pg_tdeam_scan_analyze_next_tuple(), count + * heapam_scan_analyze_next_tuple(), count * HEAPTUPLE_INSERT_IN_PROGRESS tuples as live only * when inserted by our own transaction. */ @@ -1595,7 +1589,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, * Count HEAPTUPLE_DELETE_IN_PROGRESS tuples as live, * if they were not deleted by the current * transaction. That's what - * pg_tdeam_scan_analyze_next_tuple() does, and we want + * heapam_scan_analyze_next_tuple() does, and we want * the behavior to be consistent. */ reltuples += 1; @@ -1637,7 +1631,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, } else { - /* pg_tde_getnext did the time qual check */ + /* heap_getnext did the time qual check */ tupleIsAlive = true; reltuples += 1; } @@ -1695,7 +1689,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, Page page = BufferGetPage(hscan->rs_cbuf); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - pg_tde_get_root_tuples(page, root_offsets); + heap_get_root_tuples(page, root_offsets); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); } @@ -1759,7 +1753,7 @@ pg_tdeam_index_build_range_scan(Relation heapRelation, } static void -pg_tdeam_index_validate_scan(Relation heapRelation, +heapam_index_validate_scan(Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo, Snapshot snapshot, @@ -1824,7 +1818,7 @@ pg_tdeam_index_validate_scan(Relation heapRelation, /* * Scan all tuples matching the snapshot. */ - while ((heapTuple = pg_tde_getnext(scan, ForwardScanDirection)) != NULL) + while ((heapTuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { ItemPointer heapcursor = &heapTuple->t_self; ItemPointerData rootTuple; @@ -1861,7 +1855,7 @@ pg_tdeam_index_validate_scan(Relation heapRelation, Page page = BufferGetPage(hscan->rs_cbuf); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_SHARE); - pg_tde_get_root_tuples(page, root_offsets); + heap_get_root_tuples(page, root_offsets); LockBuffer(hscan->rs_cbuf, BUFFER_LOCK_UNLOCK); memset(in_index, 0, sizeof(in_index)); @@ -2006,7 +2000,7 @@ pg_tdeam_index_validate_scan(Relation heapRelation, * further ahead than what we report. */ static BlockNumber -pg_tdeam_scan_get_blocks_done(HeapScanDesc hscan) +heapam_scan_get_blocks_done(HeapScanDesc hscan) { ParallelBlockTableScanDesc bpscan = NULL; BlockNumber startblock; @@ -2051,7 +2045,7 @@ pg_tdeam_scan_get_blocks_done(HeapScanDesc hscan) * create a toast table for something like "f1 varchar(20)".) */ static bool -pg_tdeam_relation_needs_toast_table(Relation rel) +heapam_relation_needs_toast_table(Relation rel) { int32 data_length = 0; bool maxlength_unknown = false; @@ -2099,7 +2093,7 @@ pg_tdeam_relation_needs_toast_table(Relation rel) * TOAST tables for heap relations are just heap relations. */ static Oid -pg_tdeam_relation_toast_am(Relation rel) +heapam_relation_toast_am(Relation rel) { return rel->rd_rel->relam; } @@ -2116,7 +2110,7 @@ pg_tdeam_relation_toast_am(Relation rel) (BLCKSZ - SizeOfPageHeaderData) static void -pg_tdeam_estimate_rel_size(Relation rel, int32 *attr_widths, +heapam_estimate_rel_size(Relation rel, int32 *attr_widths, BlockNumber *pages, double *tuples, double *allvisfrac) { @@ -2133,7 +2127,7 @@ pg_tdeam_estimate_rel_size(Relation rel, int32 *attr_widths, */ static bool -pg_tdeam_scan_bitmap_next_block(TableScanDesc scan, +heapam_scan_bitmap_next_block(TableScanDesc scan, TBMIterateResult *tbmres) { HeapScanDesc hscan = (HeapScanDesc) scan; @@ -2171,7 +2165,7 @@ pg_tdeam_scan_bitmap_next_block(TableScanDesc scan, /* * Prune and repair fragmentation for the whole page, if possible. */ - pg_tde_page_prune_opt(scan->rs_rd, buffer); + heap_page_prune_opt(scan->rs_rd, buffer); /* * We must hold share lock on the buffer content while examining tuple @@ -2199,7 +2193,7 @@ pg_tdeam_scan_bitmap_next_block(TableScanDesc scan, HeapTupleData heapTuple; ItemPointerSet(&tid, block, offnum); - if (pg_tde_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot, + if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot, &heapTuple, NULL, true)) hscan->rs_vistuples[ntup++] = ItemPointerGetOffsetNumber(&tid); } @@ -2248,7 +2242,7 @@ pg_tdeam_scan_bitmap_next_block(TableScanDesc scan, } static bool -pg_tdeam_scan_bitmap_next_tuple(TableScanDesc scan, +heapam_scan_bitmap_next_tuple(TableScanDesc scan, TBMIterateResult *tbmres, TupleTableSlot *slot) { @@ -2273,7 +2267,7 @@ pg_tdeam_scan_bitmap_next_tuple(TableScanDesc scan, hscan->rs_ctup.t_tableOid = scan->rs_rd->rd_id; ItemPointerSet(&hscan->rs_ctup.t_self, hscan->rs_cblock, targoffset); - pgstat_count_pg_tde_fetch(scan->rs_rd); + pgstat_count_heap_fetch(scan->rs_rd); /* * Set up the result slot to point to this tuple. Note that the slot @@ -2289,7 +2283,7 @@ pg_tdeam_scan_bitmap_next_tuple(TableScanDesc scan, } static bool -pg_tdeam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate) +heapam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate) { HeapScanDesc hscan = (HeapScanDesc) scan; TsmRoutine *tsm = scanstate->tsmroutine; @@ -2356,14 +2350,14 @@ pg_tdeam_scan_sample_next_block(TableScanDesc scan, SampleScanState *scanstate) return false; } - pg_tde_getpage(scan, blockno); + heapgetpage(scan, blockno); hscan->rs_inited = true; return true; } static bool -pg_tdeam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, +heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, TupleTableSlot *slot) { HeapScanDesc hscan = (HeapScanDesc) scan; @@ -2436,7 +2430,7 @@ pg_tdeam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate, PGTdeExecStoreBufferHeapTuple(scan->rs_rd, tuple, slot, hscan->rs_cbuf); /* Count successfully-fetched tuples as heap fetches */ - pgstat_count_pg_tde_getnext(scan->rs_rd); + pgstat_count_heap_getnext(scan->rs_rd); return true; } @@ -2489,7 +2483,7 @@ reform_and_rewrite_tuple(HeapTuple tuple, HeapTuple copiedTuple; int i; - pg_tde_deform_tuple(tuple, oldTupDesc, values, isnull); + heap_deform_tuple(tuple, oldTupDesc, values, isnull); /* Be sure to null out any dropped columns */ for (i = 0; i < newTupDesc->natts; i++) @@ -2498,12 +2492,12 @@ reform_and_rewrite_tuple(HeapTuple tuple, isnull[i] = true; } - copiedTuple = pg_tde_form_tuple(newTupDesc, values, isnull); + copiedTuple = heap_form_tuple(newTupDesc, values, isnull); /* The heap rewrite module does the rest */ - rewrite_pg_tde_tuple(rwstate, tuple, copiedTuple); + rewrite_heap_tuple(rwstate, tuple, copiedTuple); - pg_tde_freetuple(copiedTuple); + heap_freetuple(copiedTuple); } /* @@ -2562,65 +2556,65 @@ SampleHeapTupleVisible(TableScanDesc scan, Buffer buffer, static const TableAmRoutine pg_tdeam_methods = { .type = T_TableAmRoutine, - .slot_callbacks = pg_tdeam_slot_callbacks, + .slot_callbacks = heapam_slot_callbacks, - .scan_begin = pg_tde_beginscan, - .scan_end = pg_tde_endscan, - .scan_rescan = pg_tde_rescan, - .scan_getnextslot = pg_tde_getnextslot, + .scan_begin = heap_beginscan, + .scan_end = heap_endscan, + .scan_rescan = heap_rescan, + .scan_getnextslot = heap_getnextslot, - .scan_set_tidrange = pg_tde_set_tidrange, - .scan_getnextslot_tidrange = pg_tde_getnextslot_tidrange, + .scan_set_tidrange = heap_set_tidrange, + .scan_getnextslot_tidrange = heap_getnextslot_tidrange, .parallelscan_estimate = table_block_parallelscan_estimate, .parallelscan_initialize = table_block_parallelscan_initialize, .parallelscan_reinitialize = table_block_parallelscan_reinitialize, - .index_fetch_begin = pg_tdeam_index_fetch_begin, - .index_fetch_reset = pg_tdeam_index_fetch_reset, - .index_fetch_end = pg_tdeam_index_fetch_end, - .index_fetch_tuple = pg_tdeam_index_fetch_tuple, - - .tuple_insert = pg_tdeam_tuple_insert, - .tuple_insert_speculative = pg_tdeam_tuple_insert_speculative, - .tuple_complete_speculative = pg_tdeam_tuple_complete_speculative, - .multi_insert = pg_tde_multi_insert, - .tuple_delete = pg_tdeam_tuple_delete, - .tuple_update = pg_tdeam_tuple_update, - .tuple_lock = pg_tdeam_tuple_lock, - - .tuple_fetch_row_version = pg_tdeam_fetch_row_version, - .tuple_get_latest_tid = pg_tde_get_latest_tid, - .tuple_tid_valid = pg_tdeam_tuple_tid_valid, - .tuple_satisfies_snapshot = pg_tdeam_tuple_satisfies_snapshot, - .index_delete_tuples = pg_tde_index_delete_tuples, - - .relation_set_new_filelocator = pg_tdeam_relation_set_new_filelocator, - .relation_nontransactional_truncate = pg_tdeam_relation_nontransactional_truncate, - .relation_copy_data = pg_tdeam_relation_copy_data, - .relation_copy_for_cluster = pg_tdeam_relation_copy_for_cluster, - .relation_vacuum = pg_tde_vacuum_rel, - .scan_analyze_next_block = pg_tdeam_scan_analyze_next_block, - .scan_analyze_next_tuple = pg_tdeam_scan_analyze_next_tuple, - .index_build_range_scan = pg_tdeam_index_build_range_scan, - .index_validate_scan = pg_tdeam_index_validate_scan, + .index_fetch_begin = heapam_index_fetch_begin, + .index_fetch_reset = heapam_index_fetch_reset, + .index_fetch_end = heapam_index_fetch_end, + .index_fetch_tuple = heapam_index_fetch_tuple, + + .tuple_insert = heapam_tuple_insert, + .tuple_insert_speculative = heapam_tuple_insert_speculative, + .tuple_complete_speculative = heapam_tuple_complete_speculative, + .multi_insert = heap_multi_insert, + .tuple_delete = heapam_tuple_delete, + .tuple_update = heapam_tuple_update, + .tuple_lock = heapam_tuple_lock, + + .tuple_fetch_row_version = heapam_fetch_row_version, + .tuple_get_latest_tid = heap_get_latest_tid, + .tuple_tid_valid = heapam_tuple_tid_valid, + .tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot, + .index_delete_tuples = heap_index_delete_tuples, + + .relation_set_new_filelocator = heapam_relation_set_new_filelocator, + .relation_nontransactional_truncate = heapam_relation_nontransactional_truncate, + .relation_copy_data = heapam_relation_copy_data, + .relation_copy_for_cluster = heapam_relation_copy_for_cluster, + .relation_vacuum = heap_vacuum_rel, + .scan_analyze_next_block = heapam_scan_analyze_next_block, + .scan_analyze_next_tuple = heapam_scan_analyze_next_tuple, + .index_build_range_scan = heapam_index_build_range_scan, + .index_validate_scan = heapam_index_validate_scan, .relation_size = table_block_relation_size, - .relation_needs_toast_table = pg_tdeam_relation_needs_toast_table, - .relation_toast_am = pg_tdeam_relation_toast_am, - .relation_fetch_toast_slice = pg_tde_fetch_toast_slice, + .relation_needs_toast_table = heapam_relation_needs_toast_table, + .relation_toast_am = heapam_relation_toast_am, + .relation_fetch_toast_slice = heap_fetch_toast_slice, - .relation_estimate_size = pg_tdeam_estimate_rel_size, + .relation_estimate_size = heapam_estimate_rel_size, - .scan_bitmap_next_block = pg_tdeam_scan_bitmap_next_block, - .scan_bitmap_next_tuple = pg_tdeam_scan_bitmap_next_tuple, - .scan_sample_next_block = pg_tdeam_scan_sample_next_block, - .scan_sample_next_tuple = pg_tdeam_scan_sample_next_tuple + .scan_bitmap_next_block = heapam_scan_bitmap_next_block, + .scan_bitmap_next_tuple = heapam_scan_bitmap_next_tuple, + .scan_sample_next_block = heapam_scan_sample_next_block, + .scan_sample_next_tuple = heapam_scan_sample_next_tuple }; const TableAmRoutine * -GetPGTdeamTableAmRoutine(void) +GetHeapamTableAmRoutine(void) { return &pg_tdeam_methods; } diff --git a/src/access/pg_tdeam_visibility.c b/src/access/heapam_visibility.c similarity index 99% rename from src/access/pg_tdeam_visibility.c rename to src/access/heapam_visibility.c index c037e30c..a7160013 100644 --- a/src/access/pg_tdeam_visibility.c +++ b/src/access/heapam_visibility.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tdeam_visibility.c + * heapam_visibility.c * Tuple visibility rules for tuples stored in heap. * * NOTE: all the HeapTupleSatisfies routines will update the tuple's @@ -59,17 +59,14 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * src/backend/access/heap/pg_tdeam_visibility.c + * src/backend/access/heap/heapam_visibility.c * *------------------------------------------------------------------------- */ -#include "pg_tde_defines.h" - #include "postgres.h" -#include "access/pg_tdeam.h" - +#include "access/heapam.h" #include "access/htup_details.h" #include "access/multixact.h" #include "access/subtrans.h" @@ -99,7 +96,7 @@ * future re-examination of the tuple. * * We can always set hint bits when marking a transaction aborted. (Some - * code in pg_tdeam.c relies on that!) + * code in heapam.c relies on that!) * * Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then * we can always set the hint bits, since pre-9.0 VACUUM FULL always used diff --git a/src/access/pg_tdetoast.c b/src/access/heaptoast.c similarity index 91% rename from src/access/pg_tdetoast.c rename to src/access/heaptoast.c index 117dc207..52ecd456 100644 --- a/src/access/pg_tdetoast.c +++ b/src/access/heaptoast.c @@ -12,37 +12,35 @@ * * * INTERFACE ROUTINES - * pg_tde_toast_insert_or_update - + * heap_toast_insert_or_update - * Try to make a given tuple fit into one page by compressing * or moving off attributes * - * pg_tde_toast_delete - + * heap_toast_delete - * Reclaim toast storage when a tuple is deleted * *------------------------------------------------------------------------- */ -#include "pg_tde_defines.h" #include "postgres.h" -#include "access/pg_tdeam.h" -#include "access/pg_tdetoast.h" - #include "access/detoast.h" #include "access/genam.h" +#include "access/heapam.h" +#include "access/heaptoast.h" #include "access/toast_helper.h" #include "access/toast_internals.h" #include "utils/fmgroids.h" /* ---------- - * pg_tde_toast_delete - + * heap_toast_delete - * * Cascaded delete toast-entries on DELETE * ---------- */ void -pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) +heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) { TupleDesc tupleDesc; Datum toast_values[MaxHeapAttributeNumber]; @@ -58,10 +56,10 @@ pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) /* * Get the tuple descriptor and break down the tuple into fields. * - * NOTE: it's debatable whether to use pg_tde_deform_tuple() here or just - * pg_tde_getattr() only the varlena columns. The latter could win if there + * NOTE: it's debatable whether to use heap_deform_tuple() here or just + * heap_getattr() only the varlena columns. The latter could win if there * are few varlena columns and many non-varlena ones. However, - * pg_tde_deform_tuple costs only O(N) while the pg_tde_getattr way would cost + * heap_deform_tuple costs only O(N) while the heap_getattr way would cost * O(N^2) if there are many varlena columns, so it seems better to err on * the side of linear cost. (We won't even be here unless there's at * least one varlena column, by the way.) @@ -69,7 +67,7 @@ pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) tupleDesc = rel->rd_att; Assert(tupleDesc->natts <= MaxHeapAttributeNumber); - pg_tde_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull); + heap_deform_tuple(oldtup, tupleDesc, toast_values, toast_isnull); /* Do the real work. */ toast_delete_external(rel, toast_values, toast_isnull, is_speculative); @@ -77,7 +75,7 @@ pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) /* ---------- - * pg_tde_toast_insert_or_update - + * heap_toast_insert_or_update - * * Delete no-longer-used toast-entries and create new ones to * make the new tuple fit on INSERT or UPDATE @@ -85,7 +83,7 @@ pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) * Inputs: * newtup: the candidate new tuple to be inserted * oldtup: the old row version for UPDATE, or NULL for INSERT - * options: options to be passed to pg_tde_insert() for toast rows + * options: options to be passed to heap_insert() for toast rows * Result: * either newtup if no toasting is needed, or a palloc'd modified tuple * that is what should actually get stored @@ -95,7 +93,7 @@ pg_tde_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative) * ---------- */ HeapTuple -pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, +heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options) { HeapTuple result_tuple; @@ -134,9 +132,9 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, numAttrs = tupleDesc->natts; Assert(numAttrs <= MaxHeapAttributeNumber); - pg_tde_deform_tuple(newtup, tupleDesc, toast_values, toast_isnull); + heap_deform_tuple(newtup, tupleDesc, toast_values, toast_isnull); if (oldtup != NULL) - pg_tde_deform_tuple(oldtup, tupleDesc, toast_oldvalues, toast_oldisnull); + heap_deform_tuple(oldtup, tupleDesc, toast_oldvalues, toast_oldisnull); /* ---------- * Prepare for toasting @@ -170,7 +168,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * ---------- */ - /* compute header overhead --- this should match pg_tde_form_tuple() */ + /* compute header overhead --- this should match heap_form_tuple() */ hoff = SizeofHeapTupleHeader; if ((ttc.ttc_flags & TOAST_HAS_NULLS) != 0) hoff += BITMAPLEN(numAttrs); @@ -183,7 +181,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * large attributes with attstorage EXTENDED or EXTERNAL, and store them * external. */ - while (pg_tde_compute_data_size(tupleDesc, + while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen) { int biggest_attno; @@ -224,7 +222,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * are still inline, and make them external. But skip this if there's no * toast table to push them to. */ - while (pg_tde_compute_data_size(tupleDesc, + while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid) { @@ -240,7 +238,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * Round 3 - this time we take attributes with storage MAIN into * compression */ - while (pg_tde_compute_data_size(tupleDesc, + while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen) { int biggest_attno; @@ -259,7 +257,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, */ maxDataLen = TOAST_TUPLE_TARGET_MAIN - hoff; - while (pg_tde_compute_data_size(tupleDesc, + while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid) { @@ -298,7 +296,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, if ((ttc.ttc_flags & TOAST_HAS_NULLS) != 0) new_header_len += BITMAPLEN(numAttrs); new_header_len = MAXALIGN(new_header_len); - new_data_len = pg_tde_compute_data_size(tupleDesc, + new_data_len = heap_compute_data_size(tupleDesc, toast_values, toast_isnull); new_tuple_len = new_header_len + new_data_len; @@ -320,7 +318,7 @@ pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, new_data->t_hoff = new_header_len; /* Copy over the data, and fill the null bitmap if needed */ - pg_tde_fill_tuple(tupleDesc, + heap_fill_tuple(tupleDesc, toast_values, toast_isnull, (char *) new_data + new_header_len, @@ -362,7 +360,7 @@ toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc) * Break down the tuple into fields. */ Assert(numAttrs <= MaxTupleAttributeNumber); - pg_tde_deform_tuple(tup, tupleDesc, toast_values, toast_isnull); + heap_deform_tuple(tup, tupleDesc, toast_values, toast_isnull); memset(toast_free, 0, numAttrs * sizeof(bool)); @@ -388,7 +386,7 @@ toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc) /* * Form the reconfigured tuple. */ - new_tuple = pg_tde_form_tuple(tupleDesc, toast_values, toast_isnull); + new_tuple = heap_form_tuple(tupleDesc, toast_values, toast_isnull); /* * Be sure to copy the tuple's identity fields. We also make a point of @@ -444,7 +442,7 @@ toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc) * * On the other hand, in-line short-header varlena fields are left alone. * If we "untoasted" them here, they'd just get changed back to short-header - * format anyway within pg_tde_fill_tuple. + * format anyway within heap_fill_tuple. * ---------- */ Datum @@ -474,7 +472,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, * Break down the tuple into fields. */ Assert(numAttrs <= MaxTupleAttributeNumber); - pg_tde_deform_tuple(&tmptup, tupleDesc, toast_values, toast_isnull); + heap_deform_tuple(&tmptup, tupleDesc, toast_values, toast_isnull); memset(toast_free, 0, numAttrs * sizeof(bool)); @@ -504,13 +502,13 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, * Calculate the new size of the tuple. * * This should match the reconstruction code in - * pg_tde_toast_insert_or_update. + * heap_toast_insert_or_update. */ new_header_len = SizeofHeapTupleHeader; if (has_nulls) new_header_len += BITMAPLEN(numAttrs); new_header_len = MAXALIGN(new_header_len); - new_data_len = pg_tde_compute_data_size(tupleDesc, + new_data_len = heap_compute_data_size(tupleDesc, toast_values, toast_isnull); new_tuple_len = new_header_len + new_data_len; @@ -529,7 +527,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, HeapTupleHeaderSetTypMod(new_data, tupleDesc->tdtypmod); /* Copy over the data, and fill the null bitmap if needed */ - pg_tde_fill_tuple(tupleDesc, + heap_fill_tuple(tupleDesc, toast_values, toast_isnull, (char *) new_data + new_header_len, @@ -554,7 +552,7 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup, * Build a tuple containing no out-of-line toasted fields. * (This does not eliminate compressed or short-header datums.) * - * This is essentially just like pg_tde_form_tuple, except that it will + * This is essentially just like heap_form_tuple, except that it will * expand any external-data pointers beforehand. * * It's not very clear whether it would be preferable to decompress @@ -574,7 +572,7 @@ toast_build_flattened_tuple(TupleDesc tupleDesc, Pointer freeable_values[MaxTupleAttributeNumber]; /* - * We can pass the caller's isnull array directly to pg_tde_form_tuple, but + * We can pass the caller's isnull array directly to heap_form_tuple, but * we potentially need to modify the values array. */ Assert(numAttrs <= MaxTupleAttributeNumber); @@ -603,7 +601,7 @@ toast_build_flattened_tuple(TupleDesc tupleDesc, /* * Form the reconfigured tuple. */ - new_tuple = pg_tde_form_tuple(tupleDesc, new_values, isnull); + new_tuple = heap_form_tuple(tupleDesc, new_values, isnull); /* * Free allocated temp values @@ -625,7 +623,7 @@ toast_build_flattened_tuple(TupleDesc tupleDesc, * result is the varlena into which the results should be written. */ void -pg_tde_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, +heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, int32 sliceoffset, int32 slicelength, struct varlena *result) { @@ -722,7 +720,7 @@ pg_tde_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, } else if (VARATT_IS_SHORT(chunk)) { - /* could happen due to pg_tde_form_tuple doing its thing */ + /* could happen due to heap_form_tuple doing its thing */ chunksize = VARSIZE_SHORT(chunk) - VARHDRSZ_SHORT; chunkdata = VARDATA_SHORT(chunk); } diff --git a/src/access/pg_tde_io.c b/src/access/hio.c similarity index 94% rename from src/access/pg_tde_io.c rename to src/access/hio.c index b62dad71..7cf5c41e 100644 --- a/src/access/pg_tde_io.c +++ b/src/access/hio.c @@ -17,12 +17,11 @@ #include "postgres.h" -#include "access/pg_tdeam.h" -#include "access/pg_tde_io.h" -#include "access/pg_tde_visibilitymap.h" -#include "encryption/enc_tuple.h" - +#include "access/heapam.h" +#include "access/hio.h" #include "access/htup_details.h" +#include "access/visibilitymap.h" +#include "encryption/enc_tuple.h" #include "storage/bufmgr.h" #include "storage/freespace.h" #include "storage/lmgr.h" @@ -30,14 +29,14 @@ /* - * pg_tde_RelationPutHeapTuple - place tuple at specified page + * RelationPutHeapTuple - place tuple at specified page * * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!! * * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer. */ void -pg_tde_RelationPutHeapTuple(Relation relation, +RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token) @@ -64,7 +63,7 @@ pg_tde_RelationPutHeapTuple(Relation relation, pageHeader = BufferGetPage(buffer); offnum = TDE_PageAddItem(tuple->t_tableOid, BufferGetBlockNumber(buffer), pageHeader, (Item) tuple->t_data, - tuple->t_len, InvalidOffsetNumber, false, true); + tuple->t_len, InvalidOffsetNumber, false, true); if (offnum == InvalidOffsetNumber) elog(PANIC, "failed to add tuple to page"); @@ -177,10 +176,10 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, { /* Figure out which pins we need but don't have. */ need_to_pin_buffer1 = PageIsAllVisible(BufferGetPage(buffer1)) - && !pg_tde_visibilitymap_pin_ok(block1, *vmbuffer1); + && !visibilitymap_pin_ok(block1, *vmbuffer1); need_to_pin_buffer2 = buffer2 != InvalidBuffer && PageIsAllVisible(BufferGetPage(buffer2)) - && !pg_tde_visibilitymap_pin_ok(block2, *vmbuffer2); + && !visibilitymap_pin_ok(block2, *vmbuffer2); if (!need_to_pin_buffer1 && !need_to_pin_buffer2) break; @@ -192,9 +191,9 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, /* Get pins. */ if (need_to_pin_buffer1) - pg_tde_visibilitymap_pin(relation, block1, vmbuffer1); + visibilitymap_pin(relation, block1, vmbuffer1); if (need_to_pin_buffer2) - pg_tde_visibilitymap_pin(relation, block2, vmbuffer2); + visibilitymap_pin(relation, block2, vmbuffer2); /* Relock buffers. */ LockBuffer(buffer1, BUFFER_LOCK_EXCLUSIVE); @@ -287,6 +286,24 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, */ extend_by_pages += extend_by_pages * waitcount; + /* --- + * If we previously extended using the same bistate, it's very likely + * we'll extend some more. Try to extend by as many pages as + * before. This can be important for performance for several reasons, + * including: + * + * - It prevents mdzeroextend() switching between extending the + * relation in different ways, which is inefficient for some + * filesystems. + * + * - Contention is often intermittent. Even if we currently don't see + * other waiters (see above), extending by larger amounts can + * prevent future contention. + * --- + */ + if (bistate) + extend_by_pages = Max(extend_by_pages, bistate->already_extended_by); + /* * Can't extend by more than MAX_BUFFERS_TO_EXTEND_BY, we need to pin * them all concurrently. @@ -325,7 +342,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, * [auto]vacuum trying to truncate later pages as REL_TRUNCATE_MINIMUM is * way larger. */ - first_block = ExtendBufferedRelBy(EB_REL(relation), MAIN_FORKNUM, + first_block = ExtendBufferedRelBy(BMR_REL(relation), MAIN_FORKNUM, bistate ? bistate->strategy : NULL, EB_LOCK_FIRST, extend_by_pages, @@ -413,6 +430,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, /* maintain bistate->current_buf */ IncrBufferRefCount(buffer); bistate->current_buf = buffer; + bistate->already_extended_by += extend_by_pages; } return buffer; @@ -420,7 +438,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, } /* - * pg_tde_RelationGetBufferForTuple + * RelationGetBufferForTuple * * Returns pinned and exclusive-locked buffer of a page in given relation * with free space >= given len. @@ -440,7 +458,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, * to lock the same two buffers in opposite orders. To ensure that this * can't happen, we impose the rule that buffers of a relation must be * locked in increasing page number order. This is most conveniently done - * by having pg_tde_RelationGetBufferForTuple lock them both, with suitable care + * by having RelationGetBufferForTuple lock them both, with suitable care * for ordering. * * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the @@ -485,7 +503,7 @@ RelationAddBlocks(Relation relation, BulkInsertState bistate, * before any (unlogged) changes are made in buffer pool. */ Buffer -pg_tde_RelationGetBufferForTuple(Relation relation, Size len, +RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, @@ -604,14 +622,14 @@ pg_tde_RelationGetBufferForTuple(Relation relation, Size len, /* easy case */ buffer = ReadBufferBI(relation, targetBlock, RBM_NORMAL, bistate); if (PageIsAllVisible(BufferGetPage(buffer))) - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); /* * If the page is empty, pin vmbuffer to set all_frozen bit later. */ if ((options & HEAP_INSERT_FROZEN) && (PageGetMaxOffsetNumber(BufferGetPage(buffer)) == 0)) - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } @@ -620,7 +638,7 @@ pg_tde_RelationGetBufferForTuple(Relation relation, Size len, /* also easy case */ buffer = otherBuffer; if (PageIsAllVisible(BufferGetPage(buffer))) - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } else if (otherBlock < targetBlock) @@ -628,7 +646,7 @@ pg_tde_RelationGetBufferForTuple(Relation relation, Size len, /* lock other buffer first */ buffer = ReadBuffer(relation, targetBlock); if (PageIsAllVisible(BufferGetPage(buffer))) - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); } @@ -637,7 +655,7 @@ pg_tde_RelationGetBufferForTuple(Relation relation, Size len, /* lock target buffer first */ buffer = ReadBuffer(relation, targetBlock); if (PageIsAllVisible(BufferGetPage(buffer))) - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE); } @@ -765,12 +783,12 @@ pg_tde_RelationGetBufferForTuple(Relation relation, Size len, { Assert(PageGetMaxOffsetNumber(page) == 0); - if (!pg_tde_visibilitymap_pin_ok(targetBlock, *vmbuffer)) + if (!visibilitymap_pin_ok(targetBlock, *vmbuffer)) { if (!unlockedTargetBuffer) LockBuffer(buffer, BUFFER_LOCK_UNLOCK); unlockedTargetBuffer = true; - pg_tde_visibilitymap_pin(relation, targetBlock, vmbuffer); + visibilitymap_pin(relation, targetBlock, vmbuffer); } } diff --git a/src/access/pg_tde_prune.c b/src/access/pruneheap.c similarity index 97% rename from src/access/pg_tde_prune.c rename to src/access/pruneheap.c index 28f36663..47b9e209 100644 --- a/src/access/pg_tde_prune.c +++ b/src/access/pruneheap.c @@ -12,13 +12,10 @@ * *------------------------------------------------------------------------- */ -#include "pg_tde_defines.h" - #include "postgres.h" -#include "access/pg_tdeam.h" -#include "access/pg_tdeam_xlog.h" - +#include "access/heapam.h" +#include "access/heapam_xlog.h" #include "access/htup_details.h" #include "access/transam.h" #include "access/xlog.h" @@ -31,7 +28,7 @@ #include "utils/rel.h" #include "utils/snapmgr.h" -/* Working data for pg_tde_page_prune and subroutines */ +/* Working data for heap_page_prune and subroutines */ typedef struct { Relation rel; @@ -71,7 +68,7 @@ typedef struct /* * Tuple visibility is only computed once for each tuple, for correctness - * and efficiency reasons; see comment in pg_tde_page_prune() for details. + * and efficiency reasons; see comment in heap_page_prune() for details. * This is of type int8[], instead of HTSV_Result[], so we can use -1 to * indicate no visibility has been computed, e.g. for LP_DEAD items. * @@ -108,7 +105,7 @@ static void page_verify_redirects(Page page); * Caller must have pin on the buffer, and must *not* have a lock on it. */ void -pg_tde_page_prune_opt(Relation relation, Buffer buffer) +heap_page_prune_opt(Relation relation, Buffer buffer) { Page page = BufferGetPage(buffer); TransactionId prune_xid; @@ -208,7 +205,7 @@ pg_tde_page_prune_opt(Relation relation, Buffer buffer) int ndeleted, nnewlpdead; - ndeleted = pg_tde_page_prune(relation, buffer, vistest, limited_xmin, + ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin, limited_ts, &nnewlpdead, NULL); /* @@ -265,7 +262,7 @@ pg_tde_page_prune_opt(Relation relation, Buffer buffer) * Returns the number of tuples deleted from the page during this call. */ int -pg_tde_page_prune(Relation relation, Buffer buffer, +heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, @@ -392,7 +389,7 @@ pg_tde_page_prune(Relation relation, Buffer buffer, * Apply the planned item changes, then repair page fragmentation, and * update the page's hint bit about whether it has free line pointers. */ - pg_tde_page_prune_execute(buffer, + heap_page_prune_execute(buffer, prstate.redirected, prstate.nredirected, prstate.nowdead, prstate.ndead, prstate.nowunused, prstate.nunused); @@ -417,7 +414,7 @@ pg_tde_page_prune(Relation relation, Buffer buffer, */ if (RelationNeedsWAL(relation)) { - xl_pg_tde_prune xlrec; + xl_heap_prune xlrec; XLogRecPtr recptr; xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(relation); @@ -492,7 +489,7 @@ pg_tde_page_prune(Relation relation, Buffer buffer, * * Due to its cost we also only want to call * TransactionIdLimitedForOldSnapshots() if necessary, i.e. we might not have - * done so in pg_tde_page_prune_opt() if pd_prune_xid was old enough. But we + * done so in heap_page_prune_opt() if pd_prune_xid was old enough. But we * still want to be able to remove rows that are too new to be removed * according to prstate->vistest, but that can be removed based on * old_snapshot_threshold. So we call TransactionIdLimitedForOldSnapshots() on @@ -843,7 +840,7 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate) { /* * We found a redirect item that doesn't point to a valid follow-on - * item. This can happen if the loop in pg_tde_page_prune caused us to + * item. This can happen if the loop in heap_page_prune caused us to * visit the dead successor of a redirect item before visiting the * redirect item. We can clean up by setting the redirect item to * DEAD state. @@ -907,12 +904,12 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum) /* - * Perform the actual page changes needed by pg_tde_page_prune. + * Perform the actual page changes needed by heap_page_prune. * It is expected that the caller has a full cleanup lock on the * buffer. */ void -pg_tde_page_prune_execute(Buffer buffer, +heap_page_prune_execute(Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused) @@ -1056,11 +1053,11 @@ pg_tde_page_prune_execute(Buffer buffer, * One way that bugs related to HOT pruning show is redirect items pointing to * removed tuples. It's not trivial to reliably check that marking an item * unused will not orphan a redirect item during heap_prune_chain() / - * pg_tde_page_prune_execute(), so we additionally check the whole page after + * heap_page_prune_execute(), so we additionally check the whole page after * pruning. Without this check such bugs would typically only cause asserts * later, potentially well after the corruption has been introduced. * - * Also check comments in pg_tde_page_prune_execute()'s redirection loop. + * Also check comments in heap_page_prune_execute()'s redirection loop. */ static void page_verify_redirects(Page page) @@ -1111,7 +1108,7 @@ page_verify_redirects(Page page) * and reused by a completely unrelated tuple. */ void -pg_tde_get_root_tuples(Page page, OffsetNumber *root_offsets) +heap_get_root_tuples(Page page, OffsetNumber *root_offsets) { OffsetNumber offnum, maxoff; diff --git a/src/access/pg_tde_rewrite.c b/src/access/rewriteheap.c similarity index 94% rename from src/access/pg_tde_rewrite.c rename to src/access/rewriteheap.c index 19a15095..eb83f848 100644 --- a/src/access/pg_tde_rewrite.c +++ b/src/access/rewriteheap.c @@ -15,21 +15,21 @@ * * To use the facility: * - * begin_pg_tde_rewrite + * begin_heap_rewrite * while (fetch next tuple) * { * if (tuple is dead) - * rewrite_pg_tde_dead_tuple + * rewrite_heap_dead_tuple * else * { * // do any transformations here if required - * rewrite_pg_tde_tuple + * rewrite_heap_tuple * } * } - * end_pg_tde_rewrite + * end_heap_rewrite * * The contents of the new relation shouldn't be relied on until after - * end_pg_tde_rewrite is called. + * end_heap_rewrite is called. * * * IMPLEMENTATION @@ -83,9 +83,9 @@ * * We can't use the normal heap_insert function to insert into the new * heap, because heap_insert overwrites the visibility information. - * We use a special-purpose raw_pg_tde_insert function instead, which + * We use a special-purpose raw_heap_insert function instead, which * is optimized for bulk inserting a lot of tuples, knowing that we have - * exclusive access to the heap. raw_pg_tde_insert builds new pages in + * exclusive access to the heap. raw_heap_insert builds new pages in * local storage. When a page is full, or at the end of the process, * we insert it to WAL as a single record and then write it to disk * directly through smgr. Note, however, that any data sent to the new @@ -100,18 +100,18 @@ * *------------------------------------------------------------------------- */ + #include "pg_tde_defines.h" #include "postgres.h" #include -#include "access/pg_tdeam.h" -#include "access/pg_tdeam_xlog.h" -#include "access/pg_tdetoast.h" -#include "access/pg_tde_rewrite.h" +#include "access/heapam.h" +#include "access/heapam_xlog.h" +#include "access/heaptoast.h" +#include "access/rewriteheap.h" #include "encryption/enc_tuple.h" - #include "access/transam.h" #include "access/xact.h" #include "access/xloginsert.h" @@ -216,12 +216,12 @@ typedef struct RewriteMappingDataEntry /* prototypes for internal functions */ -static void raw_pg_tde_insert(RewriteState state, HeapTuple tup); +static void raw_heap_insert(RewriteState state, HeapTuple tup); /* internal logical remapping prototypes */ -static void logical_begin_pg_tde_rewrite(RewriteState state); -static void logical_rewrite_pg_tde_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple); -static void logical_end_pg_tde_rewrite(RewriteState state); +static void logical_begin_heap_rewrite(RewriteState state); +static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple); +static void logical_end_heap_rewrite(RewriteState state); /* @@ -237,7 +237,7 @@ static void logical_end_pg_tde_rewrite(RewriteState state); * to be used in subsequent calls to the other functions. */ RewriteState -begin_pg_tde_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, +begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi) { RewriteState state; @@ -289,7 +289,7 @@ begin_pg_tde_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_ MemoryContextSwitchTo(old_cxt); - logical_begin_pg_tde_rewrite(state); + logical_begin_heap_rewrite(state); return state; } @@ -300,7 +300,7 @@ begin_pg_tde_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_ * state and any other resources are freed. */ void -end_pg_tde_rewrite(RewriteState state) +end_heap_rewrite(RewriteState state) { HASH_SEQ_STATUS seq_status; UnresolvedTup unresolved; @@ -314,7 +314,7 @@ end_pg_tde_rewrite(RewriteState state) while ((unresolved = hash_seq_search(&seq_status)) != NULL) { ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid); - raw_pg_tde_insert(state, unresolved->tuple); + raw_heap_insert(state, unresolved->tuple); } /* Write the last page, if any */ @@ -343,7 +343,7 @@ end_pg_tde_rewrite(RewriteState state) if (RelationNeedsWAL(state->rs_new_rel)) smgrimmedsync(RelationGetSmgr(state->rs_new_rel), MAIN_FORKNUM); - logical_end_pg_tde_rewrite(state); + logical_end_heap_rewrite(state); /* Deleting the context frees everything */ MemoryContextDelete(state->rs_cxt); @@ -356,12 +356,12 @@ end_pg_tde_rewrite(RewriteState state) * we "freeze" very-old tuples. Note that since we scribble on new_tuple, * it had better be temp storage not a pointer to the original tuple. * - * state opaque state as returned by begin_pg_tde_rewrite + * state opaque state as returned by begin_heap_rewrite * old_tuple original tuple in the old heap * new_tuple new, rewritten tuple to be inserted to new heap */ void -rewrite_pg_tde_tuple(RewriteState state, +rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple) { MemoryContext old_cxt; @@ -391,7 +391,7 @@ rewrite_pg_tde_tuple(RewriteState state, * While we have our hands on the tuple, we may as well freeze any * eligible xmin or xmax, so that future VACUUM effort can be saved. */ - pg_tde_freeze_tuple(new_tuple->t_data, + heap_freeze_tuple(new_tuple->t_data, state->rs_old_rel->rd_rel->relfrozenxid, state->rs_old_rel->rd_rel->relminmxid, state->rs_freeze_xid, @@ -474,10 +474,10 @@ rewrite_pg_tde_tuple(RewriteState state, ItemPointerData new_tid; /* Insert the tuple and find out where it's put in new_heap */ - raw_pg_tde_insert(state, new_tuple); + raw_heap_insert(state, new_tuple); new_tid = new_tuple->t_self; - logical_rewrite_pg_tde_tuple(state, old_tid, new_tuple); + logical_rewrite_heap_tuple(state, old_tid, new_tuple); /* * If the tuple is the updated version of a row, and the prior version @@ -563,7 +563,7 @@ rewrite_pg_tde_tuple(RewriteState state, * is now known really dead and won't be written to the output. */ bool -rewrite_pg_tde_dead_tuple(RewriteState state, HeapTuple old_tuple) +rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple) { /* * If we have already seen an earlier tuple in the update chain that @@ -613,7 +613,7 @@ rewrite_pg_tde_dead_tuple(RewriteState state, HeapTuple old_tuple) * the inserted data only, not in the caller's copy). */ static void -raw_pg_tde_insert(RewriteState state, HeapTuple tup) +raw_heap_insert(RewriteState state, HeapTuple tup) { Page page = state->rs_buffer; Size pageFreeSpace, @@ -646,7 +646,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) */ options |= HEAP_INSERT_NO_LOGICAL; - heaptup = pg_tde_toast_insert_or_update(state->rs_new_rel, tup, NULL, + heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL, options); } else @@ -691,7 +691,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) /* * Now write the page. We say skipFsync = true because there's no * need for smgr to schedule an fsync for this write; we'll do it - * ourselves in end_pg_tde_rewrite. + * ourselves in end_heap_rewrite. */ PageSetChecksumInplace(page, state->rs_blockno); @@ -712,7 +712,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) /* And now we can insert the tuple into the page */ newoff = TDE_PageAddItem(heaptup->t_tableOid, BufferGetBlockNumber(state->rs_buffer), page, (Item) heaptup->t_data, heaptup->t_len, - InvalidOffsetNumber, false, true); + InvalidOffsetNumber, false, true); if (newoff == InvalidOffsetNumber) elog(ERROR, "failed to add tuple"); @@ -731,8 +731,6 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) newitemid = PageGetItemId(page, newoff); onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid); - // TODO: decrypt/encrypt - onpage_tup->t_ctid = tup->t_self; } @@ -745,7 +743,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) * Logical rewrite support * * When doing logical decoding - which relies on using cmin/cmax of catalog - * tuples, via xl_pg_tde_new_cid records - heap rewrites have to log enough + * tuples, via xl_heap_new_cid records - heap rewrites have to log enough * information to allow the decoding backend to update its internal mapping * of (relfilelocator,ctid) => (cmin, cmax) to be correct for the rewritten heap. * @@ -759,7 +757,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) * * For efficiency we don't immediately spill every single map mapping for a * row to disk but only do so in batches when we've collected several of them - * in memory or when end_pg_tde_rewrite() has been called. + * in memory or when end_heap_rewrite() has been called. * * Crash-Safety: This module diverts from the usual patterns of doing WAL * since it cannot rely on checkpoint flushing out all buffers and thus @@ -775,7 +773,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to * disk. That leaves the tail end that has not yet been flushed open to * corruption, which is solved by including the current offset in the - * xl_pg_tde_rewrite_mapping records and truncating the mapping file to it + * xl_heap_rewrite_mapping records and truncating the mapping file to it * during replay. Every time a rewrite is finished all generated mapping files * are synced to disk. * @@ -796,7 +794,7 @@ raw_pg_tde_insert(RewriteState state, HeapTuple tup) * any further action by the various logical rewrite functions. */ static void -logical_begin_pg_tde_rewrite(RewriteState state) +logical_begin_heap_rewrite(RewriteState state) { HASHCTL hash_ctl; TransactionId logical_xmin; @@ -844,7 +842,7 @@ logical_begin_pg_tde_rewrite(RewriteState state) * Flush all logical in-memory mappings to disk, but don't fsync them yet. */ static void -logical_pg_tde_rewrite_flush_mappings(RewriteState state) +logical_heap_rewrite_flush_mappings(RewriteState state) { HASH_SEQ_STATUS seq_status; RewriteMappingFile *src; @@ -864,7 +862,7 @@ logical_pg_tde_rewrite_flush_mappings(RewriteState state) { char *waldata; char *waldata_start; - xl_pg_tde_rewrite_mapping xlrec; + xl_heap_rewrite_mapping xlrec; Oid dboid; uint32 len; int written; @@ -939,10 +937,10 @@ logical_pg_tde_rewrite_flush_mappings(RewriteState state) } /* - * Logical remapping part of end_pg_tde_rewrite(). + * Logical remapping part of end_heap_rewrite(). */ static void -logical_end_pg_tde_rewrite(RewriteState state) +logical_end_heap_rewrite(RewriteState state) { HASH_SEQ_STATUS seq_status; RewriteMappingFile *src; @@ -953,7 +951,7 @@ logical_end_pg_tde_rewrite(RewriteState state) /* writeout remaining in-memory entries */ if (state->rs_num_rewrite_mappings > 0) - logical_pg_tde_rewrite_flush_mappings(state); + logical_heap_rewrite_flush_mappings(state); /* Iterate over all mappings we have written and fsync the files. */ hash_seq_init(&seq_status, state->rs_logical_mappings); @@ -1028,15 +1026,15 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, * mapping files. */ if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ ) - logical_pg_tde_rewrite_flush_mappings(state); + logical_heap_rewrite_flush_mappings(state); } /* * Perform logical remapping for a tuple that's mapped from old_tid to - * new_tuple->t_self by rewrite_pg_tde_tuple() if necessary for the tuple. + * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple. */ static void -logical_rewrite_pg_tde_tuple(RewriteState state, ItemPointerData old_tid, +logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple) { ItemPointerData new_tid = new_tuple->t_self; @@ -1110,15 +1108,15 @@ logical_rewrite_pg_tde_tuple(RewriteState state, ItemPointerData old_tid, * Replay XLOG_HEAP2_REWRITE records */ void -pg_tde_xlog_logical_rewrite(XLogReaderState *r) +heap_xlog_logical_rewrite(XLogReaderState *r) { char path[MAXPGPATH]; int fd; - xl_pg_tde_rewrite_mapping *xlrec; + xl_heap_rewrite_mapping *xlrec; uint32 len; char *data; - xlrec = (xl_pg_tde_rewrite_mapping *) XLogRecGetData(r); + xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r); snprintf(path, MAXPGPATH, "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT, diff --git a/src/access/pg_tde_vacuumlazy.c b/src/access/vacuumlazy.c similarity index 96% rename from src/access/pg_tde_vacuumlazy.c rename to src/access/vacuumlazy.c index b40ec95d..4eb953f9 100644 --- a/src/access/pg_tde_vacuumlazy.c +++ b/src/access/vacuumlazy.c @@ -17,7 +17,7 @@ * This frees up the memory space dedicated to storing dead TIDs. * * In practice VACUUM will often complete its initial pass over the target - * pg_tde relation without ever running out of space to store TIDs. This means + * heap relation without ever running out of space to store TIDs. This means * that there only needs to be one call to lazy_vacuum, after the initial pass * completes. * @@ -26,26 +26,22 @@ * * * IDENTIFICATION - * src/backend/access/pg_tde/vacuumlazy.c + * src/backend/access/heap/vacuumlazy.c * *------------------------------------------------------------------------- */ -#include "pg_tde_defines.h" - #include "postgres.h" #include -#include "access/pg_tdeam.h" -#include "access/pg_tdeam_xlog.h" -#include "access/pg_tde_visibilitymap.h" -#include "encryption/enc_tuple.h" - #include "access/amapi.h" #include "access/genam.h" +#include "access/heapam.h" +#include "access/heapam_xlog.h" #include "access/htup_details.h" #include "access/multixact.h" #include "access/transam.h" +#include "access/visibilitymap.h" #include "access/xact.h" #include "access/xlog.h" #include "access/xloginsert.h" @@ -187,7 +183,7 @@ typedef struct LVRelState * dead_items stores TIDs whose index tuples are deleted by index * vacuuming. Each TID points to an LP_DEAD line pointer from a heap page * that has been processed by lazy_scan_prune. Also needed by - * lazy_vacuum_pg_tde_rel, which marks the same LP_DEAD line pointers as + * lazy_vacuum_heap_rel, which marks the same LP_DEAD line pointers as * LP_UNUSED during second heap pass. */ VacDeadItems *dead_items; /* TIDs whose index tuples we'll delete */ @@ -260,8 +256,8 @@ static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf, bool *hastup, bool *recordfreespace); static void lazy_vacuum(LVRelState *vacrel); static bool lazy_vacuum_all_indexes(LVRelState *vacrel); -static void lazy_vacuum_pg_tde_rel(LVRelState *vacrel); -static int lazy_vacuum_pg_tde_page(LVRelState *vacrel, BlockNumber blkno, +static void lazy_vacuum_heap_rel(LVRelState *vacrel); +static int lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer); static bool lazy_check_wraparound_failsafe(LVRelState *vacrel); static void lazy_cleanup_all_indexes(LVRelState *vacrel); @@ -280,7 +276,7 @@ static BlockNumber count_nondeletable_pages(LVRelState *vacrel, bool *lock_waiter_detected); static void dead_items_alloc(LVRelState *vacrel, int nworkers); static void dead_items_cleanup(LVRelState *vacrel); -static bool pg_tde_page_is_all_visible(LVRelState *vacrel, Buffer buf, +static bool heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen); static void update_relstats_all_indexes(LVRelState *vacrel); static void vacuum_error_callback(void *arg); @@ -293,7 +289,7 @@ static void restore_vacuum_error_info(LVRelState *vacrel, /* - * pg_tde_vacuum_rel() -- perform VACUUM for one heap relation + * heap_vacuum_rel() -- perform VACUUM for one heap relation * * This routine sets things up for and then calls lazy_scan_heap, where * almost all work actually takes place. Finalizes everything after call @@ -304,7 +300,7 @@ static void restore_vacuum_error_info(LVRelState *vacrel, * and locked the relation. */ void -pg_tde_vacuum_rel(Relation rel, VacuumParams *params, +heap_vacuum_rel(Relation rel, VacuumParams *params, BufferAccessStrategy bstrategy) { LVRelState *vacrel; @@ -452,8 +448,8 @@ pg_tde_vacuum_rel(Relation rel, VacuumParams *params, * as an upper bound on the XIDs stored in the pages we'll actually scan * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs). * - * Next acquire vistest, a related cutoff that's used in pg_tde_page_prune. - * We expect vistest will always make pg_tde_page_prune remove any deleted + * Next acquire vistest, a related cutoff that's used in heap_page_prune. + * We expect vistest will always make heap_page_prune remove any deleted * tuple whose xmax is < OldestXmin. lazy_scan_prune must never become * confused about whether a tuple should be frozen or removed. (In the * future we might want to teach lazy_scan_prune to recompute vistest from @@ -571,7 +567,7 @@ pg_tde_vacuum_rel(Relation rel, VacuumParams *params, * pg_class.relpages to */ new_rel_pages = vacrel->rel_pages; /* After possible rel truncation */ - pg_tde_visibilitymap_count(rel, &new_rel_allvisible, NULL); + visibilitymap_count(rel, &new_rel_allvisible, NULL); if (new_rel_allvisible > new_rel_pages) new_rel_allvisible = new_rel_pages; @@ -803,7 +799,7 @@ pg_tde_vacuum_rel(Relation rel, VacuumParams *params, * heap pages following pruning. Earlier initial pass over the heap will * have collected the TIDs whose index tuples need to be removed. * - * Finally, invokes lazy_vacuum_pg_tde_rel to vacuum heap pages, which + * Finally, invokes lazy_vacuum_heap_rel to vacuum heap pages, which * largely consists of marking LP_DEAD items (from collected TID array) * as LP_UNUSED. This has to happen in a second, final pass over the * heap, to preserve a basic invariant that all index AMs rely on: no @@ -950,7 +946,7 @@ lazy_scan_heap(LVRelState *vacrel) * all-visible. In most cases this will be very cheap, because we'll * already have the correct page pinned anyway. */ - pg_tde_visibilitymap_pin(vacrel->rel, blkno, &vmbuffer); + visibilitymap_pin(vacrel->rel, blkno, &vmbuffer); /* * We need a buffer cleanup lock to prune HOT chains and defragment @@ -1045,7 +1041,7 @@ lazy_scan_heap(LVRelState *vacrel) { Size freespace; - lazy_vacuum_pg_tde_page(vacrel, blkno, buf, 0, vmbuffer); + lazy_vacuum_heap_page(vacrel, blkno, buf, 0, vmbuffer); /* Forget the LP_DEAD items that we just vacuumed */ dead_items->num_items = 0; @@ -1066,10 +1062,10 @@ lazy_scan_heap(LVRelState *vacrel) * Now perform FSM processing for blkno, and move on to next * page. * - * Our call to lazy_vacuum_pg_tde_page() will have considered if + * Our call to lazy_vacuum_heap_page() will have considered if * it's possible to set all_visible/all_frozen independently * of lazy_scan_prune(). Note that prunestate was invalidated - * by lazy_vacuum_pg_tde_page() call. + * by lazy_vacuum_heap_page() call. */ freespace = PageGetHeapFreeSpace(page); @@ -1079,7 +1075,7 @@ lazy_scan_heap(LVRelState *vacrel) } /* - * There was no call to lazy_vacuum_pg_tde_page() because pruning + * There was no call to lazy_vacuum_heap_page() because pruning * didn't encounter/create any LP_DEAD items that needed to be * vacuumed. Prune state has not been invalidated, so proceed * with prunestate-driven visibility map and FSM steps (just like @@ -1111,13 +1107,13 @@ lazy_scan_heap(LVRelState *vacrel) * NB: If the heap page is all-visible but the VM bit is not set, * we don't need to dirty the heap page. However, if checksums * are enabled, we do need to make sure that the heap page is - * dirtied before passing it to pg_tde_visibilitymap_set(), because it + * dirtied before passing it to visibilitymap_set(), because it * may be logged. Given that this situation should only happen in * rare cases after a crash, it is not worth optimizing. */ PageSetAllVisible(page); MarkBufferDirty(buf); - pg_tde_visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, + visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, vmbuffer, prunestate.visibility_cutoff_xid, flags); } @@ -1129,11 +1125,11 @@ lazy_scan_heap(LVRelState *vacrel) * with buffer lock before concluding that the VM is corrupt. */ else if (all_visible_according_to_vm && !PageIsAllVisible(page) && - pg_tde_visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0) + visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0) { elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", vacrel->relname, blkno); - pg_tde_visibilitymap_clear(vacrel->rel, blkno, vmbuffer, + visibilitymap_clear(vacrel->rel, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -1158,7 +1154,7 @@ lazy_scan_heap(LVRelState *vacrel) vacrel->relname, blkno); PageClearAllVisible(page); MarkBufferDirty(buf); - pg_tde_visibilitymap_clear(vacrel->rel, blkno, vmbuffer, + visibilitymap_clear(vacrel->rel, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS); } @@ -1190,7 +1186,7 @@ lazy_scan_heap(LVRelState *vacrel) * safe for REDO was logged when the page's tuples were frozen. */ Assert(!TransactionIdIsValid(prunestate.visibility_cutoff_xid)); - pg_tde_visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, + visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, vmbuffer, InvalidTransactionId, VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); @@ -1203,14 +1199,14 @@ lazy_scan_heap(LVRelState *vacrel) if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming) { /* - * Wait until lazy_vacuum_pg_tde_rel() to save free space. This + * Wait until lazy_vacuum_heap_rel() to save free space. This * doesn't just save us some cycles; it also allows us to record - * any additional free space that lazy_vacuum_pg_tde_page() will + * any additional free space that lazy_vacuum_heap_page() will * make available in cases where it's possible to truncate the * page's line pointer array. * * Note: It's not in fact 100% certain that we really will call - * lazy_vacuum_pg_tde_rel() -- lazy_vacuum() might yet opt to skip + * lazy_vacuum_heap_rel() -- lazy_vacuum() might yet opt to skip * index vacuuming (and so must skip heap vacuuming). This is * deemed okay because it only happens in emergencies, or when * there is very little free space anyway. (Besides, we start @@ -1308,7 +1304,7 @@ lazy_scan_skip(LVRelState *vacrel, Buffer *vmbuffer, BlockNumber next_block, *next_unskippable_allvis = true; while (next_unskippable_block < rel_pages) { - uint8 mapbits = pg_tde_visibilitymap_get_status(vacrel->rel, + uint8 mapbits = visibilitymap_get_status(vacrel->rel, next_unskippable_block, vmbuffer); @@ -1427,7 +1423,7 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, * (which creates a number of empty pages at the tail end of the * relation), and then enters them into the FSM. * - * Note we do not enter the page into the pg_tde_visibilitymap. That has the + * Note we do not enter the page into the visibilitymap. That has the * downside that we repeatedly visit this page in subsequent vacuums, * but otherwise we'll never discover the space on a promoted standby. * The harm of repeated checking ought to normally not be too bad. The @@ -1499,7 +1495,7 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, log_newpage_buffer(buf, true); PageSetAllVisible(page); - pg_tde_visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, + visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr, vmbuffer, InvalidTransactionId, VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN); END_CRIT_SECTION(); @@ -1520,16 +1516,16 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno, * * Caller must hold pin and buffer cleanup lock on the buffer. * - * Prior to PostgreSQL 14 there were very rare cases where pg_tde_page_prune() + * Prior to PostgreSQL 14 there were very rare cases where heap_page_prune() * was allowed to disagree with our HeapTupleSatisfiesVacuum() call about * whether or not a tuple should be considered DEAD. This happened when an - * inserting transaction concurrently aborted (after our pg_tde_page_prune() + * inserting transaction concurrently aborted (after our heap_page_prune() * call, before our HeapTupleSatisfiesVacuum() call). There was rather a lot * of complexity just so we could deal with tuples that were DEAD to VACUUM, * but nevertheless were left with storage after pruning. * * The approach we take now is to restart pruning when the race condition is - * detected. This allows pg_tde_page_prune() to prune the tuples inserted by + * detected. This allows heap_page_prune() to prune the tuples inserted by * the now-aborted transaction. This is a little crude, but it guarantees * that any items that make it into the dead_items array are simple LP_DEAD * line pointers, and that every remaining item with tuple storage is @@ -1563,7 +1559,7 @@ lazy_scan_prune(LVRelState *vacrel, /* * maxoff might be reduced following line pointer array truncation in - * pg_tde_page_prune. That's safe for us to ignore, since the reclaimed + * heap_page_prune. That's safe for us to ignore, since the reclaimed * space will continue to look like LP_UNUSED items below. */ maxoff = PageGetMaxOffsetNumber(page); @@ -1591,7 +1587,7 @@ lazy_scan_prune(LVRelState *vacrel, * lpdead_items's final value can be thought of as the number of tuples * that were deleted from indexes. */ - tuples_deleted = pg_tde_page_prune(rel, buf, vacrel->vistest, + tuples_deleted = heap_page_prune(rel, buf, vacrel->vistest, InvalidTransactionId, 0, &nnewlpdead, &vacrel->offnum); @@ -1658,8 +1654,8 @@ lazy_scan_prune(LVRelState *vacrel, /* * DEAD tuples are almost always pruned into LP_DEAD line pointers by - * pg_tde_page_prune(), but it's possible that the tuple state changed - * since pg_tde_page_prune() looked. Handle that here by restarting. + * heap_page_prune(), but it's possible that the tuple state changed + * since heap_page_prune() looked. Handle that here by restarting. * (See comments at the top of function for a full explanation.) */ res = HeapTupleSatisfiesVacuum(&tuple, vacrel->cutoffs.OldestXmin, @@ -1773,7 +1769,7 @@ lazy_scan_prune(LVRelState *vacrel, prunestate->hastup = true; /* page makes rel truncation unsafe */ /* Tuple with storage -- consider need to freeze */ - if (pg_tde_prepare_freeze_tuple(tuple.t_data, &vacrel->cutoffs, &pagefrz, + if (heap_prepare_freeze_tuple(tuple.t_data, &vacrel->cutoffs, &pagefrz, &frozen[tuples_frozen], &totally_frozen)) { /* Save prepared freeze plan for later */ @@ -1798,7 +1794,7 @@ lazy_scan_prune(LVRelState *vacrel, vacrel->offnum = InvalidOffsetNumber; /* - * Freeze the page when pg_tde_prepare_freeze_tuple indicates that at least + * Freeze the page when heap_prepare_freeze_tuple indicates that at least * one XID/MXID from before FreezeLimit/MultiXactCutoff is present. Also * freeze when pruning generated an FPI, if doing so means that we set the * page all-frozen afterwards (might not happen until final heap pass). @@ -1856,7 +1852,7 @@ lazy_scan_prune(LVRelState *vacrel, } /* Execute all freeze plans for page as a single atomic action */ - pg_tde_freeze_execute_prepared(vacrel->rel, buf, + heap_freeze_execute_prepared(vacrel->rel, buf, snapshotConflictHorizon, frozen, tuples_frozen); } @@ -1874,11 +1870,11 @@ lazy_scan_prune(LVRelState *vacrel, } /* - * VACUUM will call pg_tde_page_is_all_visible() during the second pass over + * VACUUM will call heap_page_is_all_visible() during the second pass over * the heap to determine all_visible and all_frozen for the page -- this * is a specialized version of the logic from this function. Now that * we've finished pruning and freezing, make sure that we're in total - * agreement with pg_tde_page_is_all_visible() using an assertion. + * agreement with heap_page_is_all_visible() using an assertion. */ #ifdef USE_ASSERT_CHECKING /* Note that all_frozen value does not matter when !all_visible */ @@ -1887,7 +1883,7 @@ lazy_scan_prune(LVRelState *vacrel, TransactionId cutoff; bool all_frozen; - if (!pg_tde_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen)) + if (!heap_page_is_all_visible(vacrel, buf, &cutoff, &all_frozen)) Assert(false); Assert(!TransactionIdIsValid(cutoff) || @@ -2020,8 +2016,7 @@ lazy_scan_noprune(LVRelState *vacrel, *hastup = true; /* page prevents rel truncation */ tupleheader = (HeapTupleHeader) PageGetItem(page, itemid); - // TODO: decrypt - if (pg_tde_tuple_should_freeze(tupleheader, &vacrel->cutoffs, + if (heap_tuple_should_freeze(tupleheader, &vacrel->cutoffs, &NoFreezePageRelfrozenXid, &NoFreezePageRelminMxid)) { @@ -2286,7 +2281,7 @@ lazy_vacuum(LVRelState *vacrel) * We successfully completed a round of index vacuuming. Do related * heap vacuuming now. */ - lazy_vacuum_pg_tde_rel(vacrel); + lazy_vacuum_heap_rel(vacrel); } else { @@ -2376,7 +2371,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) /* * We delete all LP_DEAD items from the first heap pass in all indexes on * each call here (except calls where we choose to do the failsafe). This - * makes the next call to lazy_vacuum_pg_tde_rel() safe (except in the event + * makes the next call to lazy_vacuum_heap_rel() safe (except in the event * of the failsafe triggering, which prevents the next call from taking * place). */ @@ -2398,7 +2393,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) } /* - * lazy_vacuum_pg_tde_rel() -- second pass over the heap for two pass strategy + * lazy_vacuum_heap_rel() -- second pass over the heap for two pass strategy * * This routine marks LP_DEAD items in vacrel->dead_items array as LP_UNUSED. * Pages that never had lazy_scan_prune record LP_DEAD items are not visited @@ -2416,7 +2411,7 @@ lazy_vacuum_all_indexes(LVRelState *vacrel) * index entry removal in batches as large as possible. */ static void -lazy_vacuum_pg_tde_rel(LVRelState *vacrel) +lazy_vacuum_heap_rel(LVRelState *vacrel) { int index = 0; BlockNumber vacuumed_pages = 0; @@ -2453,13 +2448,13 @@ lazy_vacuum_pg_tde_rel(LVRelState *vacrel) * all-visible. In most cases this will be very cheap, because we'll * already have the correct page pinned anyway. */ - pg_tde_visibilitymap_pin(vacrel->rel, blkno, &vmbuffer); + visibilitymap_pin(vacrel->rel, blkno, &vmbuffer); /* We need a non-cleanup exclusive lock to mark dead_items unused */ buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL, vacrel->bstrategy); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); - index = lazy_vacuum_pg_tde_page(vacrel, blkno, buf, index, vmbuffer); + index = lazy_vacuum_heap_page(vacrel, blkno, buf, index, vmbuffer); /* Now that we've vacuumed the page, record its available space */ page = BufferGetPage(buf); @@ -2492,7 +2487,7 @@ lazy_vacuum_pg_tde_rel(LVRelState *vacrel) } /* - * lazy_vacuum_pg_tde_page() -- free page's LP_DEAD items listed in the + * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the * vacrel->dead_items array. * * Caller must have an exclusive buffer lock on the buffer (though a full @@ -2504,7 +2499,7 @@ lazy_vacuum_pg_tde_rel(LVRelState *vacrel) * after all LP_DEAD items for the same page in the array. */ static int -lazy_vacuum_pg_tde_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, +lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, int index, Buffer vmbuffer) { VacDeadItems *dead_items = vacrel->dead_items; @@ -2556,7 +2551,7 @@ lazy_vacuum_pg_tde_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, /* XLOG stuff */ if (RelationNeedsWAL(vacrel->rel)) { - xl_pg_tde_vacuum xlrec; + xl_heap_vacuum xlrec; XLogRecPtr recptr; xlrec.nunused = nunused; @@ -2587,7 +2582,7 @@ lazy_vacuum_pg_tde_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, * emitted. */ Assert(!PageIsAllVisible(page)); - if (pg_tde_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid, + if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid, &all_frozen)) { uint8 flags = VISIBILITYMAP_ALL_VISIBLE; @@ -2599,7 +2594,7 @@ lazy_vacuum_pg_tde_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer, } PageSetAllVisible(page); - pg_tde_visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr, + visibilitymap_set(vacrel->rel, blkno, buffer, InvalidXLogRecPtr, vmbuffer, visibility_cutoff_xid, flags); } @@ -3240,7 +3235,7 @@ dead_items_cleanup(LVRelState *vacrel) * introducing new side-effects here. */ static bool -pg_tde_page_is_all_visible(LVRelState *vacrel, Buffer buf, +heap_page_is_all_visible(LVRelState *vacrel, Buffer buf, TransactionId *visibility_cutoff_xid, bool *all_frozen) { @@ -3326,7 +3321,7 @@ pg_tde_page_is_all_visible(LVRelState *vacrel, Buffer buf, /* Check whether this tuple is already frozen or not */ if (all_visible && *all_frozen && - pg_tde_tuple_needs_eventual_freeze(tuple.t_data)) + heap_tuple_needs_eventual_freeze(tuple.t_data)) *all_frozen = false; } break; diff --git a/src/access/pg_tde_visibilitymap.c b/src/access/visibilitymap.c similarity index 88% rename from src/access/pg_tde_visibilitymap.c rename to src/access/visibilitymap.c index 390c7d60..2e18cd88 100644 --- a/src/access/pg_tde_visibilitymap.c +++ b/src/access/visibilitymap.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tde_visibilitymap.c + * visibilitymap.c * bitmap for tracking visibility of heap tuples * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group @@ -8,16 +8,16 @@ * * * IDENTIFICATION - * src/backend/access/heap/pg_tde_visibilitymap.c + * src/backend/access/heap/visibilitymap.c * * INTERFACE ROUTINES - * pg_tde_visibilitymap_clear - clear bits for one page in the visibility map - * pg_tde_visibilitymap_pin - pin a map page for setting a bit - * pg_tde_visibilitymap_pin_ok - check whether correct map page is already pinned - * pg_tde_visibilitymap_set - set a bit in a previously pinned page - * pg_tde_visibilitymap_get_status - get status of bits - * pg_tde_visibilitymap_count - count number of bits set in visibility map - * pg_tde_visibilitymap_prepare_truncate - + * visibilitymap_clear - clear bits for one page in the visibility map + * visibilitymap_pin - pin a map page for setting a bit + * visibilitymap_pin_ok - check whether correct map page is already pinned + * visibilitymap_set - set a bit in a previously pinned page + * visibilitymap_get_status - get status of bits + * visibilitymap_count - count number of bits set in visibility map + * visibilitymap_prepare_truncate - * prepare for truncation of the visibility map * * NOTES @@ -84,13 +84,10 @@ * *------------------------------------------------------------------------- */ -#include "pg_tde_defines.h" - #include "postgres.h" -#include "access/pg_tdeam_xlog.h" -#include "access/pg_tde_visibilitymap.h" - +#include "access/heapam_xlog.h" +#include "access/visibilitymap.h" #include "access/xloginsert.h" #include "access/xlogutils.h" #include "miscadmin.h" @@ -133,14 +130,14 @@ static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks); /* - * pg_tde_visibilitymap_clear - clear specified bits for one page in visibility map + * visibilitymap_clear - clear specified bits for one page in visibility map * * You must pass a buffer containing the correct map page to this function. - * Call pg_tde_visibilitymap_pin first to pin the right one. This function doesn't do + * Call visibilitymap_pin first to pin the right one. This function doesn't do * any I/O. Returns true if any bits have been cleared and false otherwise. */ bool -pg_tde_visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags) +visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); @@ -158,7 +155,7 @@ pg_tde_visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint #endif if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock) - elog(ERROR, "wrong buffer passed to pg_tde_visibilitymap_clear"); + elog(ERROR, "wrong buffer passed to visibilitymap_clear"); LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE); map = PageGetContents(BufferGetPage(vmbuf)); @@ -177,23 +174,23 @@ pg_tde_visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint } /* - * pg_tde_visibilitymap_pin - pin a map page for setting a bit + * visibilitymap_pin - pin a map page for setting a bit * * Setting a bit in the visibility map is a two-phase operation. First, call - * pg_tde_visibilitymap_pin, to pin the visibility map page containing the bit for + * visibilitymap_pin, to pin the visibility map page containing the bit for * the heap page. Because that can require I/O to read the map page, you * shouldn't hold a lock on the heap page while doing that. Then, call - * pg_tde_visibilitymap_set to actually set the bit. + * visibilitymap_set to actually set the bit. * * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by - * an earlier call to pg_tde_visibilitymap_pin or pg_tde_visibilitymap_get_status on the same + * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same * relation. On return, *vmbuf is a valid buffer with the map page containing * the bit for heapBlk. * * If the page doesn't exist in the map file yet, it is extended. */ void -pg_tde_visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) +visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); @@ -209,15 +206,15 @@ pg_tde_visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) } /* - * pg_tde_visibilitymap_pin_ok - do we already have the correct page pinned? + * visibilitymap_pin_ok - do we already have the correct page pinned? * * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by - * an earlier call to pg_tde_visibilitymap_pin or pg_tde_visibilitymap_get_status on the same + * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same * relation. The return value indicates whether the buffer covers the * given heapBlk. */ bool -pg_tde_visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) +visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); @@ -225,7 +222,7 @@ pg_tde_visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) } /* - * pg_tde_visibilitymap_set - set bit(s) on a previously pinned page + * visibilitymap_set - set bit(s) on a previously pinned page * * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the @@ -242,11 +239,11 @@ pg_tde_visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf) * the heap buffer to the WAL chain to protect it from being torn. * * You must pass a buffer containing the correct map page to this function. - * Call pg_tde_visibilitymap_pin first to pin the right one. This function doesn't do + * Call visibilitymap_pin first to pin the right one. This function doesn't do * any I/O. */ void -pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, +visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags) { @@ -269,11 +266,11 @@ pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, /* Check that we have the right heap page pinned, if present */ if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk) - elog(ERROR, "wrong heap buffer passed to pg_tde_visibilitymap_set"); + elog(ERROR, "wrong heap buffer passed to visibilitymap_set"); /* Check that we have the right VM page pinned */ if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock) - elog(ERROR, "wrong VM buffer passed to pg_tde_visibilitymap_set"); + elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); page = BufferGetPage(vmBuf); map = (uint8 *) PageGetContents(page); @@ -291,7 +288,7 @@ pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, if (XLogRecPtrIsInvalid(recptr)) { Assert(!InRecovery); - recptr = log_pg_tde_visible(rel, heapBuf, vmBuf, cutoff_xid, flags); + recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags); /* * If data checksums are enabled (or wal_log_hints=on), we @@ -319,13 +316,13 @@ pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, } /* - * pg_tde_visibilitymap_get_status - get status of bits + * visibilitymap_get_status - get status of bits * * Are all tuples on heapBlk visible to all or are marked frozen, according * to the visibility map? * * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an - * earlier call to pg_tde_visibilitymap_pin or pg_tde_visibilitymap_get_status on the same + * earlier call to visibilitymap_pin or visibilitymap_get_status on the same * relation. On return, *vmbuf is a valid buffer with the map page containing * the bit for heapBlk, or InvalidBuffer. The caller is responsible for * releasing *vmbuf after it's done testing and setting bits. @@ -338,7 +335,7 @@ pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, * all concurrency issues! */ uint8 -pg_tde_visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) +visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf) { BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk); uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); @@ -379,14 +376,14 @@ pg_tde_visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf } /* - * pg_tde_visibilitymap_count - count number of bits set in visibility map + * visibilitymap_count - count number of bits set in visibility map * * Note: we ignore the possibility of race conditions when the table is being * extended concurrently with the call. New pages added to the table aren't * going to be marked all-visible or all-frozen, so they won't affect the result. */ void -pg_tde_visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen) +visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen) { BlockNumber mapBlock; BlockNumber nvisible = 0; @@ -442,7 +439,7 @@ pg_tde_visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber * } /* - * pg_tde_visibilitymap_prepare_truncate - + * visibilitymap_prepare_truncate - * prepare for truncation of the visibility map * * nheapblocks is the new size of the heap. @@ -453,7 +450,7 @@ pg_tde_visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber * * to truncate the visibility map pages. */ BlockNumber -pg_tde_visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) +visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks) { BlockNumber newnblocks; @@ -631,7 +628,7 @@ vm_extend(Relation rel, BlockNumber vm_nblocks) { Buffer buf; - buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL, + buf = ExtendBufferedRelTo(BMR_REL(rel), VISIBILITYMAP_FORKNUM, NULL, EB_CREATE_FORK_IF_NEEDED | EB_CLEAR_SIZE_CACHE, vm_nblocks, diff --git a/src/include/access/pg_tdeam.h b/src/include/access/heapam.h similarity index 77% rename from src/include/access/pg_tdeam.h rename to src/include/access/heapam.h index 46047270..faf50265 100644 --- a/src/include/access/pg_tdeam.h +++ b/src/include/access/heapam.h @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tdeam.h + * heapam.h * POSTGRES heap access method definitions. * * @@ -11,8 +11,8 @@ * *------------------------------------------------------------------------- */ -#ifndef PG_TDEAM_H -#define PG_TDEAM_H +#ifndef HEAPAM_H +#define HEAPAM_H #include "access/relation.h" /* for backward compatibility */ #include "access/relscan.h" @@ -30,7 +30,7 @@ #include "utils/snapshot.h" -/* "options" flag bits for pg_tde_insert */ +/* "options" flag bits for heap_insert */ #define HEAP_INSERT_SKIP_FSM TABLE_INSERT_SKIP_FSM #define HEAP_INSERT_FROZEN TABLE_INSERT_FROZEN #define HEAP_INSERT_NO_LOGICAL TABLE_INSERT_NO_LOGICAL @@ -101,13 +101,13 @@ typedef enum } HTSV_Result; /* - * pg_tde_prepare_freeze_tuple may request that pg_tde_freeze_execute_prepared + * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact */ #define HEAP_FREEZE_CHECK_XMIN_COMMITTED 0x01 #define HEAP_FREEZE_CHECK_XMAX_ABORTED 0x02 -/* pg_tde_prepare_freeze_tuple state describing how to freeze a tuple */ +/* heap_prepare_freeze_tuple state describing how to freeze a tuple */ typedef struct HeapTupleFreeze { /* Fields describing how to process tuple */ @@ -126,7 +126,7 @@ typedef struct HeapTupleFreeze * State used by VACUUM to track the details of freezing all eligible tuples * on a given heap page. * - * VACUUM prepares freeze plans for each page via pg_tde_prepare_freeze_tuple + * VACUUM prepares freeze plans for each page via heap_prepare_freeze_tuple * calls (every tuple with storage gets its own call). This page-level freeze * state is updated across each call, which ultimately determines whether or * not freezing the page is required. @@ -134,7 +134,7 @@ typedef struct HeapTupleFreeze * Aside from the basic question of whether or not freezing will go ahead, the * state also tracks the oldest extant XID/MXID in the table as a whole, for * the purposes of advancing relfrozenxid/relminmxid values in pg_class later - * on. Each pg_tde_prepare_freeze_tuple call pushes NewRelfrozenXid and/or + * on. Each heap_prepare_freeze_tuple call pushes NewRelfrozenXid and/or * NewRelminMxid back as required to avoid unsafe final pg_class values. Any * and all unfrozen XIDs or MXIDs that remain after VACUUM finishes _must_ * have values >= the final relfrozenxid/relminmxid values in pg_class. This @@ -148,17 +148,17 @@ typedef struct HeapTupleFreeze */ typedef struct HeapPageFreeze { - /* Is pg_tde_prepare_freeze_tuple caller required to freeze page? */ + /* Is heap_prepare_freeze_tuple caller required to freeze page? */ bool freeze_required; /* * "Freeze" NewRelfrozenXid/NewRelminMxid trackers. * - * Trackers used when pg_tde_freeze_execute_prepared freezes, or when there + * Trackers used when heap_freeze_execute_prepared freezes, or when there * are zero freeze plans for a page. It is always valid for vacuumlazy.c * to freeze any page, by definition. This even includes pages that have * no tuples with storage to consider in the first place. That way the - * 'totally_frozen' results from pg_tde_prepare_freeze_tuple can always be + * 'totally_frozen' results from heap_prepare_freeze_tuple can always be * used in the same way, even when no freeze plans need to be executed to * "freeze the page". Only the "freeze" path needs to consider the need * to set pages all-frozen in the visibility map under this scheme. @@ -170,7 +170,7 @@ typedef struct HeapPageFreeze * ratchet back the top-level NewRelfrozenXid/NewRelminMxid trackers? * * It is useful to use a definition of "freeze the page" that does not - * overspecify how MultiXacts are affected. pg_tde_prepare_freeze_tuple + * overspecify how MultiXacts are affected. heap_prepare_freeze_tuple * generally prefers to remove Multis eagerly, but lazy processing is used * in cases where laziness allows VACUUM to avoid allocating a new Multi. * The "freeze the page" trackers enable this flexibility. @@ -194,7 +194,7 @@ typedef struct HeapPageFreeze /* ---------------- * function prototypes for heap access method * - * pg_tde_create, pg_tde_create_with_catalog, and pg_tde_drop_with_catalog + * heap_create, heap_create_with_catalog, and heap_drop_with_catalog * are declared in catalog/heap.h * ---------------- */ @@ -206,99 +206,99 @@ typedef struct HeapPageFreeze */ #define HeapScanIsValid(scan) PointerIsValid(scan) -extern TableScanDesc pg_tde_beginscan(Relation relation, Snapshot snapshot, +extern TableScanDesc heap_beginscan(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags); -extern void pg_tde_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, +extern void heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks); -extern void pg_tde_getpage(TableScanDesc sscan, BlockNumber block); -extern void pg_tde_rescan(TableScanDesc sscan, ScanKey key, bool set_params, +extern void heapgetpage(TableScanDesc sscan, BlockNumber block); +extern void heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode); -extern void pg_tde_endscan(TableScanDesc sscan); -extern HeapTuple pg_tde_getnext(TableScanDesc sscan, ScanDirection direction); -extern bool pg_tde_getnextslot(TableScanDesc sscan, +extern void heap_endscan(TableScanDesc sscan); +extern HeapTuple heap_getnext(TableScanDesc sscan, ScanDirection direction); +extern bool heap_getnextslot(TableScanDesc sscan, ScanDirection direction, struct TupleTableSlot *slot); -extern void pg_tde_set_tidrange(TableScanDesc sscan, ItemPointer mintid, +extern void heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid); -extern bool pg_tde_getnextslot_tidrange(TableScanDesc sscan, +extern bool heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot); -extern bool pg_tde_fetch(Relation relation, Snapshot snapshot, +extern bool heap_fetch(Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf); -extern bool pg_tde_hot_search_buffer(ItemPointer tid, Relation relation, +extern bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call); -extern void pg_tde_get_latest_tid(TableScanDesc sscan, ItemPointer tid); +extern void heap_get_latest_tid(TableScanDesc sscan, ItemPointer tid); extern BulkInsertState GetBulkInsertState(void); extern void FreeBulkInsertState(BulkInsertState); extern void ReleaseBulkInsertStatePin(BulkInsertState bistate); -extern void pg_tde_insert(Relation relation, HeapTuple tup, CommandId cid, +extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate); -extern void pg_tde_multi_insert(Relation relation, struct TupleTableSlot **slots, +extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate); -extern TM_Result pg_tde_delete(Relation relation, ItemPointer tid, +extern TM_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, struct TM_FailureData *tmfd, bool changingPart); -extern void pg_tde_finish_speculative(Relation relation, ItemPointer tid); -extern void pg_tde_abort_speculative(Relation relation, ItemPointer tid); -extern TM_Result pg_tde_update(Relation relation, ItemPointer otid, +extern void heap_finish_speculative(Relation relation, ItemPointer tid); +extern void heap_abort_speculative(Relation relation, ItemPointer tid); +extern TM_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, struct TM_FailureData *tmfd, LockTupleMode *lockmode, TU_UpdateIndexes *update_indexes); -extern TM_Result pg_tde_lock_tuple(Relation relation, HeapTuple tuple, +extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, struct TM_FailureData *tmfd); -extern void pg_tde_inplace_update(Relation relation, HeapTuple tuple); -extern bool pg_tde_prepare_freeze_tuple(HeapTupleHeader tuple, +extern void heap_inplace_update(Relation relation, HeapTuple tuple); +extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen); -extern void pg_tde_freeze_execute_prepared(Relation rel, Buffer buffer, +extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer, TransactionId snapshotConflictHorizon, HeapTupleFreeze *tuples, int ntuples); -extern bool pg_tde_freeze_tuple(HeapTupleHeader tuple, +extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId FreezeLimit, TransactionId MultiXactCutoff); -extern bool pg_tde_tuple_should_freeze(HeapTupleHeader tuple, +extern bool heap_tuple_should_freeze(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, TransactionId *NoFreezePageRelfrozenXid, MultiXactId *NoFreezePageRelminMxid); -extern bool pg_tde_tuple_needs_eventual_freeze(HeapTupleHeader tuple); +extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple); -extern void simple_pg_tde_insert(Relation relation, HeapTuple tup); -extern void simple_pg_tde_delete(Relation relation, ItemPointer tid); -extern void simple_pg_tde_update(Relation relation, ItemPointer otid, +extern void simple_heap_insert(Relation relation, HeapTuple tup); +extern void simple_heap_delete(Relation relation, ItemPointer tid); +extern void simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup, TU_UpdateIndexes *update_indexes); -extern TransactionId pg_tde_index_delete_tuples(Relation rel, +extern TransactionId heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate); /* in heap/pruneheap.c */ struct GlobalVisState; -extern void pg_tde_page_prune_opt(Relation relation, Buffer buffer); -extern int pg_tde_page_prune(Relation relation, Buffer buffer, +extern void heap_page_prune_opt(Relation relation, Buffer buffer); +extern int heap_page_prune(Relation relation, Buffer buffer, struct GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc); -extern void pg_tde_page_prune_execute(Buffer buffer, +extern void heap_page_prune_execute(Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused); -extern void pg_tde_get_root_tuples(Page page, OffsetNumber *root_offsets); +extern void heap_get_root_tuples(Page page, OffsetNumber *root_offsets); /* in heap/vacuumlazy.c */ struct VacuumParams; -extern void pg_tde_vacuum_rel(Relation rel, +extern void heap_vacuum_rel(Relation rel, struct VacuumParams *params, BufferAccessStrategy bstrategy); /* in heap/heapam_visibility.c */ @@ -329,4 +329,4 @@ extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data, extern void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot); -#endif /* PG_TDEAM_H */ +#endif /* HEAPAM_H */ diff --git a/src/include/access/pg_tdeam_xlog.h b/src/include/access/heapam_xlog.h similarity index 72% rename from src/include/access/pg_tdeam_xlog.h rename to src/include/access/heapam_xlog.h index e4c80e40..a0384507 100644 --- a/src/include/access/pg_tdeam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * - * pg_tdeam_xlog.h - * POSTGRES pg_tde access XLOG definitions. + * heapam_xlog.h + * POSTGRES heap access XLOG definitions. * * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group @@ -11,8 +11,8 @@ * *------------------------------------------------------------------------- */ -#ifndef PG_TDEAM_XLOG_H -#define PG_TDEAM_XLOG_H +#ifndef HEAPAM_XLOG_H +#define HEAPAM_XLOG_H #include "access/htup.h" #include "access/xlogreader.h" @@ -24,7 +24,7 @@ /* - * WAL record definitions for pg_tdeam.c's WAL operations + * WAL record definitions for heapam.c's WAL operations * * XLOG allows to store some information in high 4 bits of log * record xl_info field. We use 3 for opcode and one for init bit. @@ -45,7 +45,7 @@ */ #define XLOG_HEAP_INIT_PAGE 0x80 /* - * We ran out of opcodes, so pg_tdeam.c now has a second RmgrId. These opcodes + * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes * are associated with RM_HEAP2_ID, but are not logically different from * the ones above associated with RM_HEAP_ID. XLOG_HEAP_OPMASK applies to * these, too. @@ -60,7 +60,7 @@ #define XLOG_HEAP2_NEW_CID 0x70 /* - * xl_pg_tde_insert/xl_pg_tde_multi_insert flag values, 8 bits are available. + * xl_heap_insert/xl_heap_multi_insert flag values, 8 bits are available. */ /* PD_ALL_VISIBLE was cleared */ #define XLH_INSERT_ALL_VISIBLE_CLEARED (1<<0) @@ -73,7 +73,7 @@ #define XLH_INSERT_ALL_FROZEN_SET (1<<5) /* - * xl_pg_tde_update flag values, 8 bits are available. + * xl_heap_update flag values, 8 bits are available. */ /* PD_ALL_VISIBLE was cleared */ #define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED (1<<0) @@ -90,7 +90,7 @@ (XLH_UPDATE_CONTAINS_OLD_TUPLE | XLH_UPDATE_CONTAINS_OLD_KEY) /* - * xl_pg_tde_delete flag values, 8 bits are available. + * xl_heap_delete flag values, 8 bits are available. */ /* PD_ALL_VISIBLE was cleared */ #define XLH_DELETE_ALL_VISIBLE_CLEARED (1<<0) @@ -104,18 +104,18 @@ (XLH_DELETE_CONTAINS_OLD_TUPLE | XLH_DELETE_CONTAINS_OLD_KEY) /* This is what we need to know about delete */ -typedef struct xl_pg_tde_delete +typedef struct xl_heap_delete { TransactionId xmax; /* xmax of the deleted tuple */ OffsetNumber offnum; /* deleted tuple's offset */ uint8 infobits_set; /* infomask bits */ uint8 flags; -} xl_pg_tde_delete; +} xl_heap_delete; -#define SizeOfHeapDelete (offsetof(xl_pg_tde_delete, flags) + sizeof(uint8)) +#define SizeOfHeapDelete (offsetof(xl_heap_delete, flags) + sizeof(uint8)) /* - * xl_pg_tde_truncate flag values, 8 bits are available. + * xl_heap_truncate flag values, 8 bits are available. */ #define XLH_TRUNCATE_CASCADE (1<<0) #define XLH_TRUNCATE_RESTART_SEQS (1<<1) @@ -125,15 +125,15 @@ typedef struct xl_pg_tde_delete * sequence relids that need to be restarted, if any. * All rels are always within the same database, so we just list dbid once. */ -typedef struct xl_pg_tde_truncate +typedef struct xl_heap_truncate { Oid dbId; uint32 nrelids; uint8 flags; Oid relids[FLEXIBLE_ARRAY_MEMBER]; -} xl_pg_tde_truncate; +} xl_heap_truncate; -#define SizeOfHeapTruncate (offsetof(xl_pg_tde_truncate, relids)) +#define SizeOfHeapTruncate (offsetof(xl_heap_truncate, relids)) /* * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted @@ -141,30 +141,30 @@ typedef struct xl_pg_tde_truncate * fields that are available elsewhere in the WAL record, or perhaps just * plain needn't be reconstructed. These are the fields we must store. */ -typedef struct xl_pg_tde_header +typedef struct xl_heap_header { uint16 t_infomask2; uint16 t_infomask; uint8 t_hoff; -} xl_pg_tde_header; +} xl_heap_header; -#define SizeOfHeapHeader (offsetof(xl_pg_tde_header, t_hoff) + sizeof(uint8)) +#define SizeOfHeapHeader (offsetof(xl_heap_header, t_hoff) + sizeof(uint8)) /* This is what we need to know about insert */ -typedef struct xl_pg_tde_insert +typedef struct xl_heap_insert { OffsetNumber offnum; /* inserted tuple's offset */ uint8 flags; - /* xl_pg_tde_header & TUPLE DATA in backup block 0 */ -} xl_pg_tde_insert; + /* xl_heap_header & TUPLE DATA in backup block 0 */ +} xl_heap_insert; -#define SizeOfHeapInsert (offsetof(xl_pg_tde_insert, flags) + sizeof(uint8)) +#define SizeOfHeapInsert (offsetof(xl_heap_insert, flags) + sizeof(uint8)) /* * This is what we need to know about a multi-insert. * - * The main data of the record consists of this xl_pg_tde_multi_insert header. + * The main data of the record consists of this xl_heap_multi_insert header. * 'offsets' array is omitted if the whole page is reinitialized * (XLOG_HEAP_INIT_PAGE). * @@ -172,14 +172,14 @@ typedef struct xl_pg_tde_insert * followed by the tuple data for each tuple. There is padding to align * each xl_multi_insert_tuple struct. */ -typedef struct xl_pg_tde_multi_insert +typedef struct xl_heap_multi_insert { uint8 flags; uint16 ntuples; OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]; -} xl_pg_tde_multi_insert; +} xl_heap_multi_insert; -#define SizeOfHeapMultiInsert offsetof(xl_pg_tde_multi_insert, offsets) +#define SizeOfHeapMultiInsert offsetof(xl_heap_multi_insert, offsets) typedef struct xl_multi_insert_tuple { @@ -200,7 +200,7 @@ typedef struct xl_multi_insert_tuple * If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set, * the prefix and/or suffix come first, as one or two uint16s. * - * After that, xl_pg_tde_header and new tuple data follow. The new tuple + * After that, xl_heap_header and new tuple data follow. The new tuple * data doesn't include the prefix and suffix, which are copied from the * old tuple on replay. * @@ -209,7 +209,7 @@ typedef struct xl_multi_insert_tuple * * Backup blk 1: old page, if different. (no data, just a reference to the blk) */ -typedef struct xl_pg_tde_update +typedef struct xl_heap_update { TransactionId old_xmax; /* xmax of the old tuple */ OffsetNumber old_offnum; /* old tuple's offset */ @@ -220,11 +220,11 @@ typedef struct xl_pg_tde_update /* * If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags - * are set, xl_pg_tde_header and tuple data for the old tuple follow. + * are set, xl_heap_header and tuple data for the old tuple follow. */ -} xl_pg_tde_update; +} xl_heap_update; -#define SizeOfHeapUpdate (offsetof(xl_pg_tde_update, new_offnum) + sizeof(OffsetNumber)) +#define SizeOfHeapUpdate (offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber)) /* * This is what we need to know about page pruning (both during VACUUM and @@ -240,7 +240,7 @@ typedef struct xl_pg_tde_update * * Acquires a full cleanup lock. */ -typedef struct xl_pg_tde_prune +typedef struct xl_heap_prune { TransactionId snapshotConflictHorizon; uint16 nredirected; @@ -248,23 +248,23 @@ typedef struct xl_pg_tde_prune bool isCatalogRel; /* to handle recovery conflict during logical * decoding on standby */ /* OFFSET NUMBERS are in the block reference 0 */ -} xl_pg_tde_prune; +} xl_heap_prune; -#define SizeOfHeapPrune (offsetof(xl_pg_tde_prune, isCatalogRel) + sizeof(bool)) +#define SizeOfHeapPrune (offsetof(xl_heap_prune, isCatalogRel) + sizeof(bool)) /* * The vacuum page record is similar to the prune record, but can only mark - * already LP_DEAD items LP_UNUSED (during VACUUM's second pg_tde pass) + * already LP_DEAD items LP_UNUSED (during VACUUM's second heap pass) * * Acquires an ordinary exclusive lock only. */ -typedef struct xl_pg_tde_vacuum +typedef struct xl_heap_vacuum { uint16 nunused; /* OFFSET NUMBERS are in the block reference 0 */ -} xl_pg_tde_vacuum; +} xl_heap_vacuum; -#define SizeOfHeapVacuum (offsetof(xl_pg_tde_vacuum, nunused) + sizeof(uint16)) +#define SizeOfHeapVacuum (offsetof(xl_heap_vacuum, nunused) + sizeof(uint16)) /* flags for infobits_set */ #define XLHL_XMAX_IS_MULTI 0x01 @@ -273,57 +273,57 @@ typedef struct xl_pg_tde_vacuum #define XLHL_XMAX_KEYSHR_LOCK 0x08 #define XLHL_KEYS_UPDATED 0x10 -/* flag bits for xl_pg_tde_lock / xl_pg_tde_lock_updated's flag field */ +/* flag bits for xl_heap_lock / xl_heap_lock_updated's flag field */ #define XLH_LOCK_ALL_FROZEN_CLEARED 0x01 /* This is what we need to know about lock */ -typedef struct xl_pg_tde_lock +typedef struct xl_heap_lock { TransactionId xmax; /* might be a MultiXactId */ OffsetNumber offnum; /* locked tuple's offset on page */ uint8 infobits_set; /* infomask and infomask2 bits to set */ uint8 flags; /* XLH_LOCK_* flag bits */ -} xl_pg_tde_lock; +} xl_heap_lock; -#define SizeOfHeapLock (offsetof(xl_pg_tde_lock, flags) + sizeof(uint8)) +#define SizeOfHeapLock (offsetof(xl_heap_lock, flags) + sizeof(uint8)) /* This is what we need to know about locking an updated version of a row */ -typedef struct xl_pg_tde_lock_updated +typedef struct xl_heap_lock_updated { TransactionId xmax; OffsetNumber offnum; uint8 infobits_set; uint8 flags; -} xl_pg_tde_lock_updated; +} xl_heap_lock_updated; -#define SizeOfHeapLockUpdated (offsetof(xl_pg_tde_lock_updated, flags) + sizeof(uint8)) +#define SizeOfHeapLockUpdated (offsetof(xl_heap_lock_updated, flags) + sizeof(uint8)) /* This is what we need to know about confirmation of speculative insertion */ -typedef struct xl_pg_tde_confirm +typedef struct xl_heap_confirm { OffsetNumber offnum; /* confirmed tuple's offset on page */ -} xl_pg_tde_confirm; +} xl_heap_confirm; -#define SizeOfHeapConfirm (offsetof(xl_pg_tde_confirm, offnum) + sizeof(OffsetNumber)) +#define SizeOfHeapConfirm (offsetof(xl_heap_confirm, offnum) + sizeof(OffsetNumber)) /* This is what we need to know about in-place update */ -typedef struct xl_pg_tde_inplace +typedef struct xl_heap_inplace { OffsetNumber offnum; /* updated tuple's offset on page */ /* TUPLE DATA FOLLOWS AT END OF STRUCT */ -} xl_pg_tde_inplace; +} xl_heap_inplace; -#define SizeOfHeapInplace (offsetof(xl_pg_tde_inplace, offnum) + sizeof(OffsetNumber)) +#define SizeOfHeapInplace (offsetof(xl_heap_inplace, offnum) + sizeof(OffsetNumber)) /* * This struct represents a 'freeze plan', which describes how to freeze a - * group of one or more pg_tde tuples (appears in xl_pg_tde_freeze_page record) + * group of one or more heap tuples (appears in xl_heap_freeze_page record) */ /* 0x01 was XLH_FREEZE_XMIN */ #define XLH_FREEZE_XVAC 0x02 #define XLH_INVALID_XVAC 0x04 -typedef struct xl_pg_tde_freeze_plan +typedef struct xl_heap_freeze_plan { TransactionId xmax; uint16 t_infomask2; @@ -332,17 +332,17 @@ typedef struct xl_pg_tde_freeze_plan /* Length of individual page offset numbers array for this plan */ uint16 ntuples; -} xl_pg_tde_freeze_plan; +} xl_heap_freeze_plan; /* * This is what we need to know about a block being frozen during vacuum * - * Backup block 0's data contains an array of xl_pg_tde_freeze_plan structs + * Backup block 0's data contains an array of xl_heap_freeze_plan structs * (with nplans elements), followed by one or more page offset number arrays. * Each such page offset number array corresponds to a single freeze plan - * (REDO routine freezes corresponding pg_tde tuples using freeze plan). + * (REDO routine freezes corresponding heap tuples using freeze plan). */ -typedef struct xl_pg_tde_freeze_page +typedef struct xl_heap_freeze_page { TransactionId snapshotConflictHorizon; uint16 nplans; @@ -352,25 +352,25 @@ typedef struct xl_pg_tde_freeze_page /* * In payload of blk 0 : FREEZE PLANS and OFFSET NUMBER ARRAY */ -} xl_pg_tde_freeze_page; +} xl_heap_freeze_page; -#define SizeOfHeapFreezePage (offsetof(xl_pg_tde_freeze_page, isCatalogRel) + sizeof(bool)) +#define SizeOfHeapFreezePage (offsetof(xl_heap_freeze_page, isCatalogRel) + sizeof(bool)) /* * This is what we need to know about setting a visibility map bit * * Backup blk 0: visibility map buffer - * Backup blk 1: pg_tde buffer + * Backup blk 1: heap buffer */ -typedef struct xl_pg_tde_visible +typedef struct xl_heap_visible { TransactionId snapshotConflictHorizon; uint8 flags; -} xl_pg_tde_visible; +} xl_heap_visible; -#define SizeOfHeapVisible (offsetof(xl_pg_tde_visible, flags) + sizeof(uint8)) +#define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8)) -typedef struct xl_pg_tde_new_cid +typedef struct xl_heap_new_cid { /* * store toplevel xid so we don't have to merge cids from different @@ -386,12 +386,12 @@ typedef struct xl_pg_tde_new_cid */ RelFileLocator target_locator; ItemPointerData target_tid; -} xl_pg_tde_new_cid; +} xl_heap_new_cid; -#define SizeOfHeapNewCid (offsetof(xl_pg_tde_new_cid, target_tid) + sizeof(ItemPointerData)) +#define SizeOfHeapNewCid (offsetof(xl_heap_new_cid, target_tid) + sizeof(ItemPointerData)) /* logical rewrite xlog record header */ -typedef struct xl_pg_tde_rewrite_mapping +typedef struct xl_heap_rewrite_mapping { TransactionId mapped_xid; /* xid that might need to see the row */ Oid mapped_db; /* DbOid or InvalidOid for shared rels */ @@ -399,23 +399,23 @@ typedef struct xl_pg_tde_rewrite_mapping off_t offset; /* How far have we written so far */ uint32 num_mappings; /* Number of in-memory mappings */ XLogRecPtr start_lsn; /* Insert LSN at begin of rewrite */ -} xl_pg_tde_rewrite_mapping; +} xl_heap_rewrite_mapping; extern void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon); -extern void pg_tde_redo(XLogReaderState *record); -extern void pg_tde_desc(StringInfo buf, XLogReaderState *record); +extern void heap_redo(XLogReaderState *record); +extern void heap_desc(StringInfo buf, XLogReaderState *record); extern const char *heap_identify(uint8 info); -extern void pg_tde_mask(char *pagedata, BlockNumber blkno); -extern void pg_tde2_redo(XLogReaderState *record); -extern void pg_tde2_desc(StringInfo buf, XLogReaderState *record); +extern void heap_mask(char *pagedata, BlockNumber blkno); +extern void heap2_redo(XLogReaderState *record); +extern void heap2_desc(StringInfo buf, XLogReaderState *record); extern const char *heap2_identify(uint8 info); -extern void pg_tde_xlog_logical_rewrite(XLogReaderState *r); +extern void heap_xlog_logical_rewrite(XLogReaderState *r); -extern XLogRecPtr log_pg_tde_visible(Relation rel, Buffer heap_buffer, +extern XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags); -#endif /* PG_TDEAM_XLOG_H */ +#endif /* HEAPAM_XLOG_H */ diff --git a/src/include/access/pg_tdetoast.h b/src/include/access/heaptoast.h similarity index 90% rename from src/include/access/pg_tdetoast.h rename to src/include/access/heaptoast.h index 0fca5e0f..5c0a796f 100644 --- a/src/include/access/pg_tdetoast.h +++ b/src/include/access/heaptoast.h @@ -10,9 +10,8 @@ * *------------------------------------------------------------------------- */ -#ifndef PG_TDE_TOAST_H -#define PG_TDE_TOAST_H - +#ifndef HEAPTOAST_H +#define HEAPTOAST_H #include "access/htup_details.h" #include "storage/lockdefs.h" @@ -90,21 +89,21 @@ VARHDRSZ) /* ---------- - * pg_tde_toast_insert_or_update - + * heap_toast_insert_or_update - * - * Called by pg_tde_insert() and pg_tde_update(). + * Called by heap_insert() and heap_update(). * ---------- */ -extern HeapTuple pg_tde_toast_insert_or_update(Relation rel, HeapTuple newtup, +extern HeapTuple heap_toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, int options); /* ---------- - * pg_tde_toast_delete - + * heap_toast_delete - * - * Called by pg_tde_delete(). + * Called by heap_delete(). * ---------- */ -extern void pg_tde_toast_delete(Relation rel, HeapTuple oldtup, +extern void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative); /* ---------- @@ -138,13 +137,13 @@ extern HeapTuple toast_build_flattened_tuple(TupleDesc tupleDesc, bool *isnull); /* ---------- - * pg_tde_fetch_toast_slice + * heap_fetch_toast_slice * * Fetch a slice from a toast value stored in a heap table. * ---------- */ -extern void pg_tde_fetch_toast_slice(Relation toastrel, Oid valueid, +extern void heap_fetch_toast_slice(Relation toastrel, Oid valueid, int32 attrsize, int32 sliceoffset, int32 slicelength, struct varlena *result); -#endif /* PG_TDE_TOAST_H */ +#endif /* HEAPTOAST_H */ diff --git a/src/include/access/pg_tde_io.h b/src/include/access/hio.h similarity index 66% rename from src/include/access/pg_tde_io.h rename to src/include/access/hio.h index 607c870f..9bc563b7 100644 --- a/src/include/access/pg_tde_io.h +++ b/src/include/access/hio.h @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tde_io.h + * hio.h * POSTGRES heap access method input/output definitions. * * @@ -11,8 +11,8 @@ * *------------------------------------------------------------------------- */ -#ifndef PG_TDE_IO_H -#define PG_TDE_IO_H +#ifndef HIO_H +#define HIO_H #include "access/htup.h" #include "storage/buf.h" @@ -32,24 +32,31 @@ typedef struct BulkInsertStateData Buffer current_buf; /* current insertion target page */ /* - * State for bulk extensions. Further pages that were unused at the time - * of the extension. They might be in use by the time we use them though, - * so rechecks are needed. + * State for bulk extensions. + * + * last_free..next_free are further pages that were unused at the time of + * the last extension. They might be in use by the time we use them + * though, so rechecks are needed. * * XXX: Eventually these should probably live in RelationData instead, * alongside targetblock. + * + * already_extended_by is the number of pages that this bulk inserted + * extended by. If we already extended by a significant number of pages, + * we can be more aggressive about extending going forward. */ BlockNumber next_free; BlockNumber last_free; + uint32 already_extended_by; } BulkInsertStateData; -extern void pg_tde_RelationPutHeapTuple(Relation relation, Buffer buffer, +extern void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token); -extern Buffer pg_tde_RelationGetBufferForTuple(Relation relation, Size len, +extern Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertStateData *bistate, Buffer *vmbuffer, Buffer *vmbuffer_other, int num_pages); -#endif /* PG_TDE_IO_H */ +#endif /* HIO_H */ diff --git a/src/include/access/pg_tde_visibilitymap.h b/src/include/access/pg_tde_visibilitymap.h deleted file mode 100644 index 6234347b..00000000 --- a/src/include/access/pg_tde_visibilitymap.h +++ /dev/null @@ -1,42 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_tde_visibilitymap.h - * visibility map interface - * - * - * Portions Copyright (c) 2007-2023, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/access/pg_tde_visibilitymap.h - * - *------------------------------------------------------------------------- - */ -#ifndef PG_TDE_VISIBILITYMAP_H -#define PG_TDE_VISIBILITYMAP_H - -#include "access/visibilitymapdefs.h" -#include "access/xlogdefs.h" -#include "storage/block.h" -#include "storage/buf.h" -#include "utils/relcache.h" - -/* Macros for pg_tde_visibilitymap test */ -#define VM_ALL_VISIBLE(r, b, v) \ - ((pg_tde_visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_VISIBLE) != 0) -#define VM_ALL_FROZEN(r, b, v) \ - ((pg_tde_visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_FROZEN) != 0) - -extern bool pg_tde_visibilitymap_clear(Relation rel, BlockNumber heapBlk, - Buffer vmbuf, uint8 flags); -extern void pg_tde_visibilitymap_pin(Relation rel, BlockNumber heapBlk, - Buffer *vmbuf); -extern bool pg_tde_visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf); -extern void pg_tde_visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, - XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, - uint8 flags); -extern uint8 pg_tde_visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); -extern void pg_tde_visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen); -extern BlockNumber pg_tde_visibilitymap_prepare_truncate(Relation rel, - BlockNumber nheapblocks); - -#endif /* PG_TDE_VISIBILITYMAP_H */ diff --git a/src/include/access/pg_tde_rewrite.h b/src/include/access/rewriteheap.h similarity index 79% rename from src/include/access/pg_tde_rewrite.h rename to src/include/access/rewriteheap.h index 55c12ea8..11254570 100644 --- a/src/include/access/pg_tde_rewrite.h +++ b/src/include/access/rewriteheap.h @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * pg_tde_rewrite.h + * rewriteheap.h * Declarations for heap rewrite support functions * * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group @@ -10,8 +10,8 @@ * *------------------------------------------------------------------------- */ -#ifndef PG_TDE_REWRITE_H -#define PG_TDE_REWRITE_H +#ifndef REWRITE_HEAP_H +#define REWRITE_HEAP_H #include "access/htup.h" #include "storage/itemptr.h" @@ -21,13 +21,13 @@ /* struct definition is private to rewriteheap.c */ typedef struct RewriteStateData *RewriteState; -extern RewriteState begin_pg_tde_rewrite(Relation old_heap, Relation new_heap, +extern RewriteState begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin, TransactionId freeze_xid, MultiXactId cutoff_multi); -extern void end_pg_tde_rewrite(RewriteState state); -extern void rewrite_pg_tde_tuple(RewriteState state, HeapTuple old_tuple, +extern void end_heap_rewrite(RewriteState state); +extern void rewrite_heap_tuple(RewriteState state, HeapTuple old_tuple, HeapTuple new_tuple); -extern bool rewrite_pg_tde_dead_tuple(RewriteState state, HeapTuple old_tuple); +extern bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple); /* * On-Disk data format for an individual logical rewrite mapping. @@ -54,4 +54,4 @@ typedef struct LogicalRewriteMappingData #define LOGICAL_REWRITE_FORMAT "map-%x-%x-%X_%X-%x-%x" extern void CheckPointLogicalRewriteHeap(void); -#endif /* PG_TDE_REWRITE_H */ +#endif /* REWRITE_HEAP_H */ diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h new file mode 100644 index 00000000..daaa01a2 --- /dev/null +++ b/src/include/access/visibilitymap.h @@ -0,0 +1,42 @@ +/*------------------------------------------------------------------------- + * + * visibilitymap.h + * visibility map interface + * + * + * Portions Copyright (c) 2007-2023, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * src/include/access/visibilitymap.h + * + *------------------------------------------------------------------------- + */ +#ifndef VISIBILITYMAP_H +#define VISIBILITYMAP_H + +#include "access/visibilitymapdefs.h" +#include "access/xlogdefs.h" +#include "storage/block.h" +#include "storage/buf.h" +#include "utils/relcache.h" + +/* Macros for visibilitymap test */ +#define VM_ALL_VISIBLE(r, b, v) \ + ((visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_VISIBLE) != 0) +#define VM_ALL_FROZEN(r, b, v) \ + ((visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_FROZEN) != 0) + +extern bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, + Buffer vmbuf, uint8 flags); +extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk, + Buffer *vmbuf); +extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf); +extern void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, + XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, + uint8 flags); +extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf); +extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen); +extern BlockNumber visibilitymap_prepare_truncate(Relation rel, + BlockNumber nheapblocks); + +#endif /* VISIBILITYMAP_H */