PostgreSQL Source Code git master
heapam_xlog.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/visibilitymap.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "storage/freespace.h"
#include "storage/standby.h"
Include dependency graph for heapam_xlog.c:

Go to the source code of this file.

Functions

static void heap_xlog_prune_freeze (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Function Documentation

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 389 of file heapam_xlog.c.

390{
391 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
393 *infomask2 &= ~HEAP_KEYS_UPDATED;
394
395 if (infobits & XLHL_XMAX_IS_MULTI)
396 *infomask |= HEAP_XMAX_IS_MULTI;
397 if (infobits & XLHL_XMAX_LOCK_ONLY)
398 *infomask |= HEAP_XMAX_LOCK_ONLY;
399 if (infobits & XLHL_XMAX_EXCL_LOCK)
400 *infomask |= HEAP_XMAX_EXCL_LOCK;
401 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
402 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
403 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
404
405 if (infobits & XLHL_KEYS_UPDATED)
406 *infomask2 |= HEAP_KEYS_UPDATED;
407}
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:397
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:394
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:395
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:396
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:398
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:289
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:197
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:209
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:196
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:194

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 1346 of file heapam_xlog.c.

1347{
1348 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1349
1350 switch (info & XLOG_HEAP_OPMASK)
1351 {
1355 heap_xlog_prune_freeze(record);
1356 break;
1357 case XLOG_HEAP2_VISIBLE:
1358 heap_xlog_visible(record);
1359 break;
1361 heap_xlog_multi_insert(record);
1362 break;
1364 heap_xlog_lock_updated(record);
1365 break;
1366 case XLOG_HEAP2_NEW_CID:
1367
1368 /*
1369 * Nothing to do on a real replay, only used during logical
1370 * decoding.
1371 */
1372 break;
1373 case XLOG_HEAP2_REWRITE:
1375 break;
1376 default:
1377 elog(PANIC, "heap2_redo: unknown op code %u", info);
1378 }
1379}
uint8_t uint8
Definition: c.h:540
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:226
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition: heapam_xlog.c:30
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam_xlog.c:1190
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam_xlog.c:607
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam_xlog.c:255
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:64
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:59
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:42
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:65
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:66
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:63
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1072
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:409

References elog, heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_prune_freeze(), heap_xlog_visible(), PANIC, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, and XLogRecGetInfo.

◆ heap_mask()

void heap_mask ( char *  pagedata,
BlockNumber  blkno 
)

Definition at line 1385 of file heapam_xlog.c.

1386{
1387 Page page = (Page) pagedata;
1388 OffsetNumber off;
1389
1391
1392 mask_page_hint_bits(page);
1393 mask_unused_space(page);
1394
1395 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1396 {
1397 ItemId iid = PageGetItemId(page, off);
1398 char *page_item;
1399
1400 page_item = (char *) (page + ItemIdGetOffset(iid));
1401
1402 if (ItemIdIsNormal(iid))
1403 {
1404 HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
1405
1406 /*
1407 * If xmin of a tuple is not yet frozen, we should ignore
1408 * differences in hint bits, since they can be set without
1409 * emitting WAL.
1410 */
1411 if (!HeapTupleHeaderXminFrozen(page_htup))
1412 page_htup->t_infomask &= ~HEAP_XACT_MASK;
1413 else
1414 {
1415 /* Still we need to mask xmax hint bits. */
1416 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1417 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1418 }
1419
1420 /*
1421 * During replay, we set Command Id to FirstCommandId. Hence, mask
1422 * it. See heap_xlog_insert() for details.
1423 */
1425
1426 /*
1427 * For a speculative tuple, heap_insert() does not set ctid in the
1428 * caller-passed heap tuple itself, leaving the ctid field to
1429 * contain a speculative token value - a per-backend monotonically
1430 * increasing identifier. Besides, it does not WAL-log ctid under
1431 * any circumstances.
1432 *
1433 * During redo, heap_xlog_insert() sets t_ctid to current block
1434 * number and self offset number. It doesn't care about any
1435 * speculative insertions on the primary. Hence, we set t_ctid to
1436 * current block number and self offset number to ignore any
1437 * inconsistency.
1438 */
1439 if (HeapTupleHeaderIsSpeculative(page_htup))
1440 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1441
1442 /*
1443 * NB: Not ignoring ctid changes due to the tuple having moved
1444 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1445 * important information that needs to be in-sync between primary
1446 * and standby, and thus is WAL logged.
1447 */
1448 }
1449
1450 /*
1451 * Ignore any padding bytes after the tuple, when the length of the
1452 * item is not MAXALIGNed.
1453 */
1454 if (ItemIdHasStorage(iid))
1455 {
1456 int len = ItemIdGetLength(iid);
1457 int padlen = MAXALIGN(len) - len;
1458
1459 if (padlen > 0)
1460 memset(page_item + len, MASK_MARKER, padlen);
1461 }
1462 }
1463}
void mask_page_lsn_and_checksum(Page page)
Definition: bufmask.c:31
void mask_unused_space(Page page)
Definition: bufmask.c:71
void mask_page_hint_bits(Page page)
Definition: bufmask.c:46
#define MASK_MARKER
Definition: bufmask.h:24
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
PageData * Page
Definition: bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define MAXALIGN(LEN)
Definition: c.h:814
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
Definition: htup_details.h:350
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:207
#define HEAP_XACT_MASK
Definition: htup_details.h:215
#define HEAP_XMAX_INVALID
Definition: htup_details.h:208
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
Definition: htup_details.h:461
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
uint16 OffsetNumber
Definition: off.h:24
const void size_t len
union HeapTupleFields::@48 t_field3
CommandId t_cid
Definition: htup_details.h:129
union HeapTupleHeaderData::@49 t_choice
ItemPointerData t_ctid
Definition: htup_details.h:161
HeapTupleFields t_heap
Definition: htup_details.h:157

References HEAP_XACT_MASK, HEAP_XMAX_COMMITTED, HEAP_XMAX_INVALID, HeapTupleHeaderIsSpeculative(), HeapTupleHeaderXminFrozen(), ItemIdGetLength, ItemIdGetOffset, ItemIdHasStorage, ItemIdIsNormal, ItemPointerSet(), len, MASK_MARKER, mask_page_hint_bits(), mask_page_lsn_and_checksum(), mask_unused_space(), MAXALIGN, PageGetItemId(), PageGetMaxOffsetNumber(), and HeapTupleHeaderData::t_infomask.

◆ heap_redo()

void heap_redo ( XLogReaderState record)

Definition at line 1300 of file heapam_xlog.c.

1301{
1302 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1303
1304 /*
1305 * These operations don't overwrite MVCC data so no conflict processing is
1306 * required. The ones in heap2 rmgr do.
1307 */
1308
1309 switch (info & XLOG_HEAP_OPMASK)
1310 {
1311 case XLOG_HEAP_INSERT:
1312 heap_xlog_insert(record);
1313 break;
1314 case XLOG_HEAP_DELETE:
1315 heap_xlog_delete(record);
1316 break;
1317 case XLOG_HEAP_UPDATE:
1318 heap_xlog_update(record, false);
1319 break;
1320 case XLOG_HEAP_TRUNCATE:
1321
1322 /*
1323 * TRUNCATE is a no-op because the actions are already logged as
1324 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1325 * decoding.
1326 */
1327 break;
1329 heap_xlog_update(record, true);
1330 break;
1331 case XLOG_HEAP_CONFIRM:
1332 heap_xlog_confirm(record);
1333 break;
1334 case XLOG_HEAP_LOCK:
1335 heap_xlog_lock(record);
1336 break;
1337 case XLOG_HEAP_INPLACE:
1338 heap_xlog_inplace(record);
1339 break;
1340 default:
1341 elog(PANIC, "heap_redo: unknown op code %u", info);
1342 }
1343}
static void heap_xlog_insert(XLogReaderState *record)
Definition: heapam_xlog.c:489
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
Definition: heapam_xlog.c:802
static void heap_xlog_delete(XLogReaderState *record)
Definition: heapam_xlog.c:413
static void heap_xlog_lock(XLogReaderState *record)
Definition: heapam_xlog.c:1116
static void heap_xlog_inplace(XLogReaderState *record)
Definition: heapam_xlog.c:1253
static void heap_xlog_confirm(XLogReaderState *record)
Definition: heapam_xlog.c:1077
#define XLOG_HEAP_HOT_UPDATE
Definition: heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:34
#define XLOG_HEAP_TRUNCATE
Definition: heapam_xlog.h:36
#define XLOG_HEAP_UPDATE
Definition: heapam_xlog.h:35
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:40
#define XLOG_HEAP_LOCK
Definition: heapam_xlog.h:39
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:33
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:38

References elog, heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_update(), PANIC, XLOG_HEAP_CONFIRM, XLOG_HEAP_DELETE, XLOG_HEAP_HOT_UPDATE, XLOG_HEAP_INPLACE, XLOG_HEAP_INSERT, XLOG_HEAP_LOCK, XLOG_HEAP_OPMASK, XLOG_HEAP_TRUNCATE, XLOG_HEAP_UPDATE, and XLogRecGetInfo.

◆ heap_xlog_confirm()

static void heap_xlog_confirm ( XLogReaderState record)
static

Definition at line 1077 of file heapam_xlog.c.

1078{
1079 XLogRecPtr lsn = record->EndRecPtr;
1080 xl_heap_confirm *xlrec = (xl_heap_confirm *) XLogRecGetData(record);
1081 Buffer buffer;
1082 Page page;
1083 OffsetNumber offnum;
1084 ItemId lp = NULL;
1085 HeapTupleHeader htup;
1086
1087 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1088 {
1089 page = BufferGetPage(buffer);
1090
1091 offnum = xlrec->offnum;
1092 if (PageGetMaxOffsetNumber(page) >= offnum)
1093 lp = PageGetItemId(page, offnum);
1094
1095 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1096 elog(PANIC, "invalid lp");
1097
1098 htup = (HeapTupleHeader) PageGetItem(page, lp);
1099
1100 /*
1101 * Confirm tuple as actually inserted
1102 */
1103 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
1104
1105 PageSetLSN(page, lsn);
1106 MarkBufferDirty(buffer);
1107 }
1108 if (BufferIsValid(buffer))
1109 UnlockReleaseBuffer(buffer);
1110}
int Buffer
Definition: buf.h:23
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4224
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5388
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2937
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:425
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:376
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
XLogRecPtr EndRecPtr
Definition: xlogreader.h:206
OffsetNumber offnum
Definition: heapam_xlog.h:428
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define XLogRecGetData(decoder)
Definition: xlogreader.h:414
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:303
@ BLK_NEEDS_REDO
Definition: xlogutils.h:74

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), elog, XLogReaderState::EndRecPtr, ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), xl_heap_confirm::offnum, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLogReadBufferForRedo(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_delete()

static void heap_xlog_delete ( XLogReaderState record)
static

Definition at line 413 of file heapam_xlog.c.

414{
415 XLogRecPtr lsn = record->EndRecPtr;
416 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
417 Buffer buffer;
418 Page page;
419 ItemId lp = NULL;
420 HeapTupleHeader htup;
421 BlockNumber blkno;
422 RelFileLocator target_locator;
423 ItemPointerData target_tid;
424
425 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
426 ItemPointerSetBlockNumber(&target_tid, blkno);
427 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
428
429 /*
430 * The visibility map may need to be fixed even if the heap page is
431 * already up-to-date.
432 */
434 {
435 Relation reln = CreateFakeRelcacheEntry(target_locator);
436 Buffer vmbuffer = InvalidBuffer;
437
438 visibilitymap_pin(reln, blkno, &vmbuffer);
439 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
440 ReleaseBuffer(vmbuffer);
442 }
443
444 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
445 {
446 page = BufferGetPage(buffer);
447
448 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
449 lp = PageGetItemId(page, xlrec->offnum);
450
451 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
452 elog(PANIC, "invalid lp");
453
454 htup = (HeapTupleHeader) PageGetItem(page, lp);
455
457 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
460 &htup->t_infomask, &htup->t_infomask2);
461 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
462 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
463 else
466
467 /* Mark the page as a candidate for pruning */
468 PageSetPrunable(page, XLogRecGetXid(record));
469
472
473 /* Make sure t_ctid is set correctly */
476 else
477 htup->t_ctid = target_tid;
478 PageSetLSN(page, lsn);
479 MarkBufferDirty(buffer);
480 }
481 if (BufferIsValid(buffer))
482 UnlockReleaseBuffer(buffer);
483}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5371
static void PageClearAllVisible(Page page)
Definition: bufpage.h:438
#define PageSetPrunable(page, xid)
Definition: bufpage.h:446
#define FirstCommandId
Definition: c.h:677
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
Definition: heapam_xlog.c:389
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:102
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:106
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:105
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
Definition: htup_details.h:431
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
Definition: htup_details.h:549
#define HEAP_XMAX_BITS
Definition: htup_details.h:281
#define HEAP_MOVED
Definition: htup_details.h:213
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:331
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
Definition: htup_details.h:486
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
Definition: htup_details.h:383
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
TransactionId xmax
Definition: heapam_xlog.h:115
OffsetNumber offnum
Definition: heapam_xlog.h:116
uint8 infobits_set
Definition: heapam_xlog.h:117
#define InvalidTransactionId
Definition: transam.h:31
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
#define VISIBILITYMAP_VALID_BITS
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1991
#define XLogRecGetXid(decoder)
Definition: xlogreader.h:411
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:618
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition: xlogutils.c:571

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, FirstCommandId, fix_infomask_from_infobits(), xl_heap_delete::flags, FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetMovedPartitions(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidTransactionId, ItemIdIsNormal, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), xl_heap_delete::offnum, PageClearAllVisible(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_IS_PARTITION_MOVE, XLH_DELETE_IS_SUPER, XLogReadBufferForRedo(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetXid, and xl_heap_delete::xmax.

Referenced by heap_redo().

◆ heap_xlog_inplace()

static void heap_xlog_inplace ( XLogReaderState record)
static

Definition at line 1253 of file heapam_xlog.c.

1254{
1255 XLogRecPtr lsn = record->EndRecPtr;
1256 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
1257 Buffer buffer;
1258 Page page;
1259 OffsetNumber offnum;
1260 ItemId lp = NULL;
1261 HeapTupleHeader htup;
1262 uint32 oldlen;
1263 Size newlen;
1264
1265 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1266 {
1267 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1268
1269 page = BufferGetPage(buffer);
1270
1271 offnum = xlrec->offnum;
1272 if (PageGetMaxOffsetNumber(page) >= offnum)
1273 lp = PageGetItemId(page, offnum);
1274
1275 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1276 elog(PANIC, "invalid lp");
1277
1278 htup = (HeapTupleHeader) PageGetItem(page, lp);
1279
1280 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1281 if (oldlen != newlen)
1282 elog(PANIC, "wrong tuple length");
1283
1284 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1285
1286 PageSetLSN(page, lsn);
1287 MarkBufferDirty(buffer);
1288 }
1289 if (BufferIsValid(buffer))
1290 UnlockReleaseBuffer(buffer);
1291
1293 xlrec->nmsgs,
1294 xlrec->relcacheInitFileInval,
1295 xlrec->dbId,
1296 xlrec->tsId);
1297}
uint32_t uint32
Definition: c.h:542
size_t Size
Definition: c.h:614
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:1135
OffsetNumber offnum
Definition: heapam_xlog.h:436
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:441
bool relcacheInitFileInval
Definition: heapam_xlog.h:439
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
Definition: xlogreader.c:2045

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), xl_heap_inplace::dbId, elog, XLogReaderState::EndRecPtr, ItemIdGetLength, ItemIdIsNormal, MarkBufferDirty(), xl_heap_inplace::msgs, xl_heap_inplace::nmsgs, xl_heap_inplace::offnum, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ProcessCommittedInvalidationMessages(), xl_heap_inplace::relcacheInitFileInval, HeapTupleHeaderData::t_hoff, xl_heap_inplace::tsId, UnlockReleaseBuffer(), XLogReadBufferForRedo(), XLogRecGetBlockData(), and XLogRecGetData.

Referenced by heap_redo().

◆ heap_xlog_insert()

static void heap_xlog_insert ( XLogReaderState record)
static

Definition at line 489 of file heapam_xlog.c.

490{
491 XLogRecPtr lsn = record->EndRecPtr;
492 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
493 Buffer buffer;
494 Page page;
495 union
496 {
499 } tbuf;
500 HeapTupleHeader htup;
501 xl_heap_header xlhdr;
502 uint32 newlen;
503 Size freespace = 0;
504 RelFileLocator target_locator;
505 BlockNumber blkno;
506 ItemPointerData target_tid;
508
509 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
510 ItemPointerSetBlockNumber(&target_tid, blkno);
511 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
512
513 /* No freezing in the heap_insert() code path */
515
516 /*
517 * The visibility map may need to be fixed even if the heap page is
518 * already up-to-date.
519 */
521 {
522 Relation reln = CreateFakeRelcacheEntry(target_locator);
523 Buffer vmbuffer = InvalidBuffer;
524
525 visibilitymap_pin(reln, blkno, &vmbuffer);
526 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
527 ReleaseBuffer(vmbuffer);
529 }
530
531 /*
532 * If we inserted the first and only tuple on the page, re-initialize the
533 * page from scratch.
534 */
536 {
537 buffer = XLogInitBufferForRedo(record, 0);
538 page = BufferGetPage(buffer);
539 PageInit(page, BufferGetPageSize(buffer), 0);
541 }
542 else
543 action = XLogReadBufferForRedo(record, 0, &buffer);
544 if (action == BLK_NEEDS_REDO)
545 {
546 Size datalen;
547 char *data;
548
549 page = BufferGetPage(buffer);
550
551 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
552 elog(PANIC, "invalid max offset number");
553
554 data = XLogRecGetBlockData(record, 0, &datalen);
555
556 newlen = datalen - SizeOfHeapHeader;
557 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
558 memcpy(&xlhdr, data, SizeOfHeapHeader);
560
561 htup = &tbuf.hdr;
563 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
564 memcpy((char *) htup + SizeofHeapTupleHeader,
565 data,
566 newlen);
567 newlen += SizeofHeapTupleHeader;
568 htup->t_infomask2 = xlhdr.t_infomask2;
569 htup->t_infomask = xlhdr.t_infomask;
570 htup->t_hoff = xlhdr.t_hoff;
573 htup->t_ctid = target_tid;
574
575 if (PageAddItem(page, htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber)
576 elog(PANIC, "failed to add tuple");
577
578 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
579
580 PageSetLSN(page, lsn);
581
584
585 MarkBufferDirty(buffer);
586 }
587 if (BufferIsValid(buffer))
588 UnlockReleaseBuffer(buffer);
589
590 /*
591 * If the page is running low on free space, update the FSM as well.
592 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
593 * better than that without knowing the fill-factor for the table.
594 *
595 * XXX: Don't do this if the page was restored from full page image. We
596 * don't bother to update the FSM in that case, it doesn't need to be
597 * totally accurate anyway.
598 */
599 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
600 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
601}
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:414
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:471
#define MemSet(start, val, len)
Definition: c.h:1023
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:211
Assert(PointerIsAligned(start, uint64))
#define XLH_INSERT_ALL_FROZEN_SET
Definition: heapam_xlog.h:79
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:72
#define SizeOfHeapHeader
Definition: heapam_xlog.h:157
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:47
#define SizeofHeapTupleHeader
Definition: htup_details.h:185
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
Definition: htup_details.h:422
#define MaxHeapTupleSize
Definition: htup_details.h:610
#define InvalidOffsetNumber
Definition: off.h:26
const void * data
uint16 t_infomask
Definition: heapam_xlog.h:153
uint16 t_infomask2
Definition: heapam_xlog.h:152
OffsetNumber offnum
Definition: heapam_xlog.h:162
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:315
XLogRedoAction
Definition: xlogutils.h:73

References generate_unaccent_rules::action, Assert(), BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, FirstCommandId, xl_heap_insert::flags, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), MarkBufferDirty(), MaxHeapTupleSize, MemSet, xl_heap_insert::offnum, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().

◆ heap_xlog_lock()

static void heap_xlog_lock ( XLogReaderState record)
static

Definition at line 1116 of file heapam_xlog.c.

1117{
1118 XLogRecPtr lsn = record->EndRecPtr;
1119 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
1120 Buffer buffer;
1121 Page page;
1122 OffsetNumber offnum;
1123 ItemId lp = NULL;
1124 HeapTupleHeader htup;
1125
1126 /*
1127 * The visibility map may need to be fixed even if the heap page is
1128 * already up-to-date.
1129 */
1131 {
1132 RelFileLocator rlocator;
1133 Buffer vmbuffer = InvalidBuffer;
1134 BlockNumber block;
1135 Relation reln;
1136
1137 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1138 reln = CreateFakeRelcacheEntry(rlocator);
1139
1140 visibilitymap_pin(reln, block, &vmbuffer);
1141 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1142
1143 ReleaseBuffer(vmbuffer);
1145 }
1146
1147 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1148 {
1149 page = BufferGetPage(buffer);
1150
1151 offnum = xlrec->offnum;
1152 if (PageGetMaxOffsetNumber(page) >= offnum)
1153 lp = PageGetItemId(page, offnum);
1154
1155 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1156 elog(PANIC, "invalid lp");
1157
1158 htup = (HeapTupleHeader) PageGetItem(page, lp);
1159
1160 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1161 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1163 &htup->t_infomask2);
1164
1165 /*
1166 * Clear relevant update flags, but only if the modified infomask says
1167 * there's no update.
1168 */
1170 {
1172 /* Make sure there is no forward chain link in t_ctid */
1173 ItemPointerSet(&htup->t_ctid,
1174 BufferGetBlockNumber(buffer),
1175 offnum);
1176 }
1177 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1179 PageSetLSN(page, lsn);
1180 MarkBufferDirty(buffer);
1181 }
1182 if (BufferIsValid(buffer))
1183 UnlockReleaseBuffer(buffer);
1184}
#define XLH_LOCK_ALL_FROZEN_CLEARED
Definition: heapam_xlog.h:401
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
Definition: htup_details.h:226
uint8 infobits_set
Definition: heapam_xlog.h:408
OffsetNumber offnum
Definition: heapam_xlog.h:407
TransactionId xmax
Definition: heapam_xlog.h:406
#define VISIBILITYMAP_ALL_FROZEN

References BLK_NEEDS_REDO, BufferGetBlockNumber(), BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, FirstCommandId, fix_infomask_from_infobits(), xl_heap_lock::flags, FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_IS_LOCKED_ONLY(), HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetXmax(), xl_heap_lock::infobits_set, InvalidBuffer, ItemIdIsNormal, ItemPointerSet(), MarkBufferDirty(), xl_heap_lock::offnum, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_ctid, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), XLogRecGetData, and xl_heap_lock::xmax.

Referenced by heap_redo().

◆ heap_xlog_lock_updated()

static void heap_xlog_lock_updated ( XLogReaderState record)
static

Definition at line 1190 of file heapam_xlog.c.

1191{
1192 XLogRecPtr lsn = record->EndRecPtr;
1193 xl_heap_lock_updated *xlrec;
1194 Buffer buffer;
1195 Page page;
1196 OffsetNumber offnum;
1197 ItemId lp = NULL;
1198 HeapTupleHeader htup;
1199
1200 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
1201
1202 /*
1203 * The visibility map may need to be fixed even if the heap page is
1204 * already up-to-date.
1205 */
1207 {
1208 RelFileLocator rlocator;
1209 Buffer vmbuffer = InvalidBuffer;
1210 BlockNumber block;
1211 Relation reln;
1212
1213 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1214 reln = CreateFakeRelcacheEntry(rlocator);
1215
1216 visibilitymap_pin(reln, block, &vmbuffer);
1217 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1218
1219 ReleaseBuffer(vmbuffer);
1221 }
1222
1223 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1224 {
1225 page = BufferGetPage(buffer);
1226
1227 offnum = xlrec->offnum;
1228 if (PageGetMaxOffsetNumber(page) >= offnum)
1229 lp = PageGetItemId(page, offnum);
1230
1231 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1232 elog(PANIC, "invalid lp");
1233
1234 htup = (HeapTupleHeader) PageGetItem(page, lp);
1235
1236 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1237 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1239 &htup->t_infomask2);
1240 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1241
1242 PageSetLSN(page, lsn);
1243 MarkBufferDirty(buffer);
1244 }
1245 if (BufferIsValid(buffer))
1246 UnlockReleaseBuffer(buffer);
1247}
TransactionId xmax
Definition: heapam_xlog.h:417
OffsetNumber offnum
Definition: heapam_xlog.h:418

References BLK_NEEDS_REDO, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), elog, XLogReaderState::EndRecPtr, fix_infomask_from_infobits(), xl_heap_lock_updated::flags, FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderSetXmax(), xl_heap_lock_updated::infobits_set, InvalidBuffer, ItemIdIsNormal, MarkBufferDirty(), xl_heap_lock_updated::offnum, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageSetLSN(), PANIC, ReleaseBuffer(), HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XLH_LOCK_ALL_FROZEN_CLEARED, XLogReadBufferForRedo(), XLogRecGetBlockTag(), XLogRecGetData, and xl_heap_lock_updated::xmax.

Referenced by heap2_redo().

◆ heap_xlog_multi_insert()

static void heap_xlog_multi_insert ( XLogReaderState record)
static

Definition at line 607 of file heapam_xlog.c.

608{
609 XLogRecPtr lsn = record->EndRecPtr;
611 RelFileLocator rlocator;
612 BlockNumber blkno;
613 Buffer buffer;
614 Page page;
615 union
616 {
619 } tbuf;
620 HeapTupleHeader htup;
621 uint32 newlen;
622 Size freespace = 0;
623 int i;
624 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
626 Buffer vmbuffer = InvalidBuffer;
627
628 /*
629 * Insertion doesn't overwrite MVCC data, so no conflict processing is
630 * required.
631 */
632 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
633
634 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
635
636 /* check that the mutually exclusive flags are not both set */
638 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
639
640 /*
641 * The visibility map may need to be fixed even if the heap page is
642 * already up-to-date.
643 */
645 {
646 Relation reln = CreateFakeRelcacheEntry(rlocator);
647
648 visibilitymap_pin(reln, blkno, &vmbuffer);
649 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
650 ReleaseBuffer(vmbuffer);
651 vmbuffer = InvalidBuffer;
653 }
654
655 if (isinit)
656 {
657 buffer = XLogInitBufferForRedo(record, 0);
658 page = BufferGetPage(buffer);
659 PageInit(page, BufferGetPageSize(buffer), 0);
661 }
662 else
663 action = XLogReadBufferForRedo(record, 0, &buffer);
664 if (action == BLK_NEEDS_REDO)
665 {
666 char *tupdata;
667 char *endptr;
668 Size len;
669
670 /* Tuples are stored as block data */
671 tupdata = XLogRecGetBlockData(record, 0, &len);
672 endptr = tupdata + len;
673
674 page = BufferGetPage(buffer);
675
676 for (i = 0; i < xlrec->ntuples; i++)
677 {
678 OffsetNumber offnum;
680
681 /*
682 * If we're reinitializing the page, the tuples are stored in
683 * order from FirstOffsetNumber. Otherwise there's an array of
684 * offsets in the WAL record, and the tuples come after that.
685 */
686 if (isinit)
687 offnum = FirstOffsetNumber + i;
688 else
689 offnum = xlrec->offsets[i];
690 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
691 elog(PANIC, "invalid max offset number");
692
693 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
694 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
695
696 newlen = xlhdr->datalen;
697 Assert(newlen <= MaxHeapTupleSize);
698 htup = &tbuf.hdr;
700 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
701 memcpy((char *) htup + SizeofHeapTupleHeader,
702 tupdata,
703 newlen);
704 tupdata += newlen;
705
706 newlen += SizeofHeapTupleHeader;
707 htup->t_infomask2 = xlhdr->t_infomask2;
708 htup->t_infomask = xlhdr->t_infomask;
709 htup->t_hoff = xlhdr->t_hoff;
712 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
713 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
714
715 offnum = PageAddItem(page, htup, newlen, offnum, true, true);
716 if (offnum == InvalidOffsetNumber)
717 elog(PANIC, "failed to add tuple");
718 }
719 if (tupdata != endptr)
720 elog(PANIC, "total tuple length mismatch");
721
722 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
723
724 PageSetLSN(page, lsn);
725
728
729 /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
730 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
731 PageSetAllVisible(page);
732
733 MarkBufferDirty(buffer);
734 }
735 if (BufferIsValid(buffer))
736 UnlockReleaseBuffer(buffer);
737
738 buffer = InvalidBuffer;
739
740 /*
741 * Read and update the visibility map (VM) block.
742 *
743 * We must always redo VM changes, even if the corresponding heap page
744 * update was skipped due to the LSN interlock. Each VM block covers
745 * multiple heap pages, so later WAL records may update other bits in the
746 * same block. If this record includes an FPI (full-page image),
747 * subsequent WAL records may depend on it to guard against torn pages.
748 *
749 * Heap page changes are replayed first to preserve the invariant:
750 * PD_ALL_VISIBLE must be set on the heap page if the VM bit is set.
751 *
752 * Note that we released the heap page lock above. During normal
753 * operation, this would be unsafe — a concurrent modification could
754 * clear PD_ALL_VISIBLE while the VM bit remained set, violating the
755 * invariant.
756 *
757 * During recovery, however, no concurrent writers exist. Therefore,
758 * updating the VM without holding the heap page lock is safe enough. This
759 * same approach is taken when replaying xl_heap_visible records (see
760 * heap_xlog_visible()).
761 */
762 if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
764 &vmbuffer) == BLK_NEEDS_REDO)
765 {
766 Page vmpage = BufferGetPage(vmbuffer);
767
768 /* initialize the page if it was read as zeros */
769 if (PageIsNew(vmpage))
770 PageInit(vmpage, BLCKSZ, 0);
771
773 vmbuffer,
776 rlocator);
777
778 Assert(BufferIsDirty(vmbuffer));
779 PageSetLSN(vmpage, lsn);
780 }
781
782 if (BufferIsValid(vmbuffer))
783 UnlockReleaseBuffer(vmbuffer);
784
785 /*
786 * If the page is running low on free space, update the FSM as well.
787 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
788 * better than that without knowing the fill-factor for the table.
789 *
790 * XXX: Don't do this if the page was restored from full page image. We
791 * don't bother to update the FSM in that case, it doesn't need to be
792 * totally accurate anyway.
793 */
794 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
795 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
796}
bool BufferIsDirty(Buffer buffer)
Definition: bufmgr.c:2905
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:51
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:233
static void PageSetAllVisible(Page page)
Definition: bufpage.h:433
#define SHORTALIGN(LEN)
Definition: c.h:810
#define SizeOfMultiInsertTuple
Definition: heapam_xlog.h:199
int i
Definition: isn.c:77
#define FirstOffsetNumber
Definition: off.h:27
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:185
uint8 visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_ALL_VISIBLE
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:340

References generate_unaccent_rules::action, Assert(), BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsDirty(), BufferIsValid(), CreateFakeRelcacheEntry(), data, xl_multi_insert_tuple::datalen, elog, XLogReaderState::EndRecPtr, FirstCommandId, FirstOffsetNumber, xl_heap_multi_insert::flags, FreeFakeRelcacheEntry(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetXmin(), i, InvalidBuffer, InvalidOffsetNumber, ItemPointerSetBlockNumber(), ItemPointerSetOffsetNumber(), len, MarkBufferDirty(), MaxHeapTupleSize, MemSet, xl_heap_multi_insert::ntuples, xl_heap_multi_insert::offsets, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetMaxOffsetNumber(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), PANIC, RBM_ZERO_ON_ERROR, ReleaseBuffer(), SHORTALIGN, SizeofHeapTupleHeader, SizeOfMultiInsertTuple, HeapTupleHeaderData::t_ctid, xl_multi_insert_tuple::t_hoff, HeapTupleHeaderData::t_hoff, xl_multi_insert_tuple::t_infomask, HeapTupleHeaderData::t_infomask, xl_multi_insert_tuple::t_infomask2, HeapTupleHeaderData::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_pin(), visibilitymap_set_vmbits(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap2_redo().

◆ heap_xlog_prune_freeze()

static void heap_xlog_prune_freeze ( XLogReaderState record)
static

Definition at line 30 of file heapam_xlog.c.

31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
34 xl_heap_prune xlrec;
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
38 Buffer vmbuffer = InvalidBuffer;
39 uint8 vmflags = 0;
40 Size freespace = 0;
41
42 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
43 memcpy(&xlrec, maindataptr, SizeOfHeapPrune);
44 maindataptr += SizeOfHeapPrune;
45
46 /*
47 * We will take an ordinary exclusive lock or a cleanup lock depending on
48 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
49 * lock, we better not be doing anything that requires moving existing
50 * tuple data.
51 */
52 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
54
55 if (xlrec.flags & XLHP_VM_ALL_VISIBLE)
56 {
58 if (xlrec.flags & XLHP_VM_ALL_FROZEN)
59 vmflags |= VISIBILITYMAP_ALL_FROZEN;
60 }
61
62 /*
63 * After xl_heap_prune is the optional snapshot conflict horizon.
64 *
65 * In Hot Standby mode, we must ensure that there are no running queries
66 * which would conflict with the changes in this record. That means we
67 * can't replay this record if it removes tuples that are still visible to
68 * transactions on the standby, freeze tuples with xids that are still
69 * considered running on the standby, or set a page as all-visible in the
70 * VM if it isn't all-visible to all transactions on the standby.
71 */
72 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
73 {
74 TransactionId snapshot_conflict_horizon;
75
76 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
77 memcpy(&snapshot_conflict_horizon, maindataptr, sizeof(TransactionId));
78 maindataptr += sizeof(TransactionId);
79
80 if (InHotStandby)
81 ResolveRecoveryConflictWithSnapshot(snapshot_conflict_horizon,
82 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
83 rlocator);
84 }
85
86 /*
87 * If we have a full-page image of the heap block, restore it and we're
88 * done with the heap block.
89 */
91 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
92 &buffer) == BLK_NEEDS_REDO)
93 {
94 Page page = BufferGetPage(buffer);
95 OffsetNumber *redirected;
96 OffsetNumber *nowdead;
97 OffsetNumber *nowunused;
98 int nredirected;
99 int ndead;
100 int nunused;
101 int nplans;
102 Size datalen;
103 xlhp_freeze_plan *plans;
104 OffsetNumber *frz_offsets;
105 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
106 bool do_prune;
107
109 &nplans, &plans, &frz_offsets,
110 &nredirected, &redirected,
111 &ndead, &nowdead,
112 &nunused, &nowunused);
113
114 do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
115
116 /* Ensure the record does something */
117 Assert(do_prune || nplans > 0 || vmflags & VISIBILITYMAP_VALID_BITS);
118
119 /*
120 * Update all line pointers per the record, and repair fragmentation
121 * if needed.
122 */
123 if (do_prune)
125 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
126 redirected, nredirected,
127 nowdead, ndead,
128 nowunused, nunused);
129
130 /* Freeze tuples */
131 for (int p = 0; p < nplans; p++)
132 {
133 HeapTupleFreeze frz;
134
135 /*
136 * Convert freeze plan representation from WAL record into
137 * per-tuple format used by heap_execute_freeze_tuple
138 */
139 frz.xmax = plans[p].xmax;
140 frz.t_infomask2 = plans[p].t_infomask2;
141 frz.t_infomask = plans[p].t_infomask;
142 frz.frzflags = plans[p].frzflags;
143 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
144
145 for (int i = 0; i < plans[p].ntuples; i++)
146 {
147 OffsetNumber offset = *(frz_offsets++);
148 ItemId lp;
149 HeapTupleHeader tuple;
150
151 lp = PageGetItemId(page, offset);
152 tuple = (HeapTupleHeader) PageGetItem(page, lp);
153 heap_execute_freeze_tuple(tuple, &frz);
154 }
155 }
156
157 /* There should be no more data */
158 Assert((char *) frz_offsets == dataptr + datalen);
159
160 if (vmflags & VISIBILITYMAP_VALID_BITS)
161 PageSetAllVisible(page);
162
163 MarkBufferDirty(buffer);
164
165 /*
166 * See log_heap_prune_and_freeze() for commentary on when we set the
167 * heap page LSN.
168 */
169 if (do_prune || nplans > 0 ||
171 PageSetLSN(page, lsn);
172
173 /*
174 * Note: we don't worry about updating the page's prunability hints.
175 * At worst this will cause an extra prune cycle to occur soon.
176 */
177 }
178
179 /*
180 * If we 1) released any space or line pointers or 2) set PD_ALL_VISIBLE
181 * or the VM, update the freespace map.
182 *
183 * Even when no actual space is freed (when only marking the page
184 * all-visible or frozen), we still update the FSM. Because the FSM is
185 * unlogged and maintained heuristically, it often becomes stale on
186 * standbys. If such a standby is later promoted and runs VACUUM, it will
187 * skip recalculating free space for pages that were marked
188 * all-visible/all-forzen. FreeSpaceMapVacuum() can then propagate overly
189 * optimistic free space values upward, causing future insertions to
190 * select pages that turn out to be unusable. In bulk, this can lead to
191 * long stalls.
192 *
193 * To prevent this, always update the FSM even when only marking a page
194 * all-visible/all-frozen.
195 *
196 * Do this regardless of whether a full-page image is logged, since FSM
197 * data is not part of the page itself.
198 */
199 if (BufferIsValid(buffer))
200 {
201 if ((xlrec.flags & (XLHP_HAS_REDIRECTIONS |
204 (vmflags & VISIBILITYMAP_VALID_BITS))
205 freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
206
207 /*
208 * We want to avoid holding an exclusive lock on the heap buffer while
209 * doing IO (either of the FSM or the VM), so we'll release it now.
210 */
211 UnlockReleaseBuffer(buffer);
212 }
213
214 /*
215 * Now read and update the VM block.
216 *
217 * We must redo changes to the VM even if the heap page was skipped due to
218 * LSN interlock. See comment in heap_xlog_multi_insert() for more details
219 * on replaying changes to the VM.
220 */
221 if ((vmflags & VISIBILITYMAP_VALID_BITS) &&
224 false,
225 &vmbuffer) == BLK_NEEDS_REDO)
226 {
227 Page vmpage = BufferGetPage(vmbuffer);
228
229 /* initialize the page if it was read as zeros */
230 if (PageIsNew(vmpage))
231 PageInit(vmpage, BLCKSZ, 0);
232
233 visibilitymap_set_vmbits(blkno, vmbuffer, vmflags, rlocator);
234
235 Assert(BufferIsDirty(vmbuffer));
236 PageSetLSN(vmpage, lsn);
237 }
238
239 if (BufferIsValid(vmbuffer))
240 UnlockReleaseBuffer(vmbuffer);
241
242 if (freespace > 0)
243 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
244}
@ RBM_NORMAL
Definition: bufmgr.h:46
uint32 TransactionId
Definition: c.h:661
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition: heapam.h:436
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:316
#define XLHP_VM_ALL_VISIBLE
Definition: heapam_xlog.h:339
#define SizeOfHeapPrune
Definition: heapam_xlog.h:295
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:331
#define XLHP_VM_ALL_FROZEN
Definition: heapam_xlog.h:340
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:329
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:308
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:330
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:298
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition: heapdesc.c:106
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1605
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:468
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146
TransactionId xmax
Definition: heapam_xlog.h:352
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
#define InHotStandby
Definition: xlogutils.h:60

References Assert(), BLK_NEEDS_REDO, BufferGetPage(), BufferIsDirty(), BufferIsValid(), XLogReaderState::EndRecPtr, xl_heap_prune::flags, HeapTupleFreeze::frzflags, xlhp_freeze_plan::frzflags, heap_execute_freeze_tuple(), heap_page_prune_execute(), heap_xlog_deserialize_prune_and_freeze(), i, InHotStandby, InvalidBuffer, InvalidOffsetNumber, MarkBufferDirty(), xlhp_freeze_plan::ntuples, HeapTupleFreeze::offset, PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), RBM_NORMAL, RBM_ZERO_ON_ERROR, ResolveRecoveryConflictWithSnapshot(), SizeOfHeapPrune, HeapTupleFreeze::t_infomask, xlhp_freeze_plan::t_infomask, HeapTupleFreeze::t_infomask2, xlhp_freeze_plan::t_infomask2, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_set_vmbits(), VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLogHintBitIsNeeded, XLogReadBufferForRedoExtended(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetData, XLogRecordPageWithFreeSpace(), HeapTupleFreeze::xmax, and xlhp_freeze_plan::xmax.

Referenced by heap2_redo().

◆ heap_xlog_update()

static void heap_xlog_update ( XLogReaderState record,
bool  hot_update 
)
static

Definition at line 802 of file heapam_xlog.c.

803{
804 XLogRecPtr lsn = record->EndRecPtr;
805 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
806 RelFileLocator rlocator;
807 BlockNumber oldblk;
808 BlockNumber newblk;
809 ItemPointerData newtid;
810 Buffer obuffer,
811 nbuffer;
812 Page page;
813 OffsetNumber offnum;
814 ItemId lp = NULL;
815 HeapTupleData oldtup;
816 HeapTupleHeader htup;
817 uint16 prefixlen = 0,
818 suffixlen = 0;
819 char *newp;
820 union
821 {
824 } tbuf;
825 xl_heap_header xlhdr;
826 uint32 newlen;
827 Size freespace = 0;
828 XLogRedoAction oldaction;
829 XLogRedoAction newaction;
830
831 /* initialize to keep the compiler quiet */
832 oldtup.t_data = NULL;
833 oldtup.t_len = 0;
834
835 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
836 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
837 {
838 /* HOT updates are never done across pages */
839 Assert(!hot_update);
840 }
841 else
842 oldblk = newblk;
843
844 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
845
846 /*
847 * The visibility map may need to be fixed even if the heap page is
848 * already up-to-date.
849 */
851 {
852 Relation reln = CreateFakeRelcacheEntry(rlocator);
853 Buffer vmbuffer = InvalidBuffer;
854
855 visibilitymap_pin(reln, oldblk, &vmbuffer);
856 visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
857 ReleaseBuffer(vmbuffer);
859 }
860
861 /*
862 * In normal operation, it is important to lock the two pages in
863 * page-number order, to avoid possible deadlocks against other update
864 * operations going the other way. However, during WAL replay there can
865 * be no other update happening, so we don't need to worry about that. But
866 * we *do* need to worry that we don't expose an inconsistent state to Hot
867 * Standby queries --- so the original page can't be unlocked before we've
868 * added the new tuple to the new page.
869 */
870
871 /* Deal with old tuple version */
872 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
873 &obuffer);
874 if (oldaction == BLK_NEEDS_REDO)
875 {
876 page = BufferGetPage(obuffer);
877 offnum = xlrec->old_offnum;
878 if (PageGetMaxOffsetNumber(page) >= offnum)
879 lp = PageGetItemId(page, offnum);
880
881 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
882 elog(PANIC, "invalid lp");
883
884 htup = (HeapTupleHeader) PageGetItem(page, lp);
885
886 oldtup.t_data = htup;
887 oldtup.t_len = ItemIdGetLength(lp);
888
890 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
891 if (hot_update)
893 else
896 &htup->t_infomask2);
897 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
899 /* Set forward chain link in t_ctid */
900 htup->t_ctid = newtid;
901
902 /* Mark the page as a candidate for pruning */
903 PageSetPrunable(page, XLogRecGetXid(record));
904
907
908 PageSetLSN(page, lsn);
909 MarkBufferDirty(obuffer);
910 }
911
912 /*
913 * Read the page the new tuple goes into, if different from old.
914 */
915 if (oldblk == newblk)
916 {
917 nbuffer = obuffer;
918 newaction = oldaction;
919 }
920 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
921 {
922 nbuffer = XLogInitBufferForRedo(record, 0);
923 page = BufferGetPage(nbuffer);
924 PageInit(page, BufferGetPageSize(nbuffer), 0);
925 newaction = BLK_NEEDS_REDO;
926 }
927 else
928 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
929
930 /*
931 * The visibility map may need to be fixed even if the heap page is
932 * already up-to-date.
933 */
935 {
936 Relation reln = CreateFakeRelcacheEntry(rlocator);
937 Buffer vmbuffer = InvalidBuffer;
938
939 visibilitymap_pin(reln, newblk, &vmbuffer);
940 visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
941 ReleaseBuffer(vmbuffer);
943 }
944
945 /* Deal with new tuple */
946 if (newaction == BLK_NEEDS_REDO)
947 {
948 char *recdata;
949 char *recdata_end;
950 Size datalen;
951 Size tuplen;
952
953 recdata = XLogRecGetBlockData(record, 0, &datalen);
954 recdata_end = recdata + datalen;
955
956 page = BufferGetPage(nbuffer);
957
958 offnum = xlrec->new_offnum;
959 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
960 elog(PANIC, "invalid max offset number");
961
963 {
964 Assert(newblk == oldblk);
965 memcpy(&prefixlen, recdata, sizeof(uint16));
966 recdata += sizeof(uint16);
967 }
969 {
970 Assert(newblk == oldblk);
971 memcpy(&suffixlen, recdata, sizeof(uint16));
972 recdata += sizeof(uint16);
973 }
974
975 memcpy(&xlhdr, recdata, SizeOfHeapHeader);
976 recdata += SizeOfHeapHeader;
977
978 tuplen = recdata_end - recdata;
979 Assert(tuplen <= MaxHeapTupleSize);
980
981 htup = &tbuf.hdr;
983
984 /*
985 * Reconstruct the new tuple using the prefix and/or suffix from the
986 * old tuple, and the data stored in the WAL record.
987 */
988 newp = (char *) htup + SizeofHeapTupleHeader;
989 if (prefixlen > 0)
990 {
991 int len;
992
993 /* copy bitmap [+ padding] [+ oid] from WAL record */
995 memcpy(newp, recdata, len);
996 recdata += len;
997 newp += len;
998
999 /* copy prefix from old tuple */
1000 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
1001 newp += prefixlen;
1002
1003 /* copy new tuple data from WAL record */
1004 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
1005 memcpy(newp, recdata, len);
1006 recdata += len;
1007 newp += len;
1008 }
1009 else
1010 {
1011 /*
1012 * copy bitmap [+ padding] [+ oid] + data from record, all in one
1013 * go
1014 */
1015 memcpy(newp, recdata, tuplen);
1016 recdata += tuplen;
1017 newp += tuplen;
1018 }
1019 Assert(recdata == recdata_end);
1020
1021 /* copy suffix from old tuple */
1022 if (suffixlen > 0)
1023 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
1024
1025 newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
1026 htup->t_infomask2 = xlhdr.t_infomask2;
1027 htup->t_infomask = xlhdr.t_infomask;
1028 htup->t_hoff = xlhdr.t_hoff;
1029
1032 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
1033 /* Make sure there is no forward chain link in t_ctid */
1034 htup->t_ctid = newtid;
1035
1036 offnum = PageAddItem(page, htup, newlen, offnum, true, true);
1037 if (offnum == InvalidOffsetNumber)
1038 elog(PANIC, "failed to add tuple");
1039
1041 PageClearAllVisible(page);
1042
1043 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
1044
1045 PageSetLSN(page, lsn);
1046 MarkBufferDirty(nbuffer);
1047 }
1048
1049 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
1050 UnlockReleaseBuffer(nbuffer);
1051 if (BufferIsValid(obuffer))
1052 UnlockReleaseBuffer(obuffer);
1053
1054 /*
1055 * If the new page is running low on free space, update the FSM as well.
1056 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
1057 * better than that without knowing the fill-factor for the table.
1058 *
1059 * However, don't update the FSM on HOT updates, because after crash
1060 * recovery, either the old or the new tuple will certainly be dead and
1061 * prunable. After pruning, the page will have roughly as much free space
1062 * as it did before the update, assuming the new tuple is about the same
1063 * size as the old one.
1064 *
1065 * XXX: Don't do this if the page was restored from full page image. We
1066 * don't bother to update the FSM in that case, it doesn't need to be
1067 * totally accurate anyway.
1068 */
1069 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
1070 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
1071}
uint16_t uint16
Definition: c.h:541
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:87
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:85
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition: heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition: heapam_xlog.h:91
static void HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
Definition: htup_details.h:543
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
TransactionId new_xmax
Definition: heapam_xlog.h:224
uint8 old_infobits_set
Definition: heapam_xlog.h:222
TransactionId old_xmax
Definition: heapam_xlog.h:220
OffsetNumber old_offnum
Definition: heapam_xlog.h:221
OffsetNumber new_offnum
Definition: heapam_xlog.h:225
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:2017

References Assert(), BLK_NEEDS_REDO, BufferGetPage(), BufferGetPageSize(), BufferIsValid(), CreateFakeRelcacheEntry(), data, elog, XLogReaderState::EndRecPtr, FirstCommandId, fix_infomask_from_infobits(), xl_heap_update::flags, FreeFakeRelcacheEntry(), HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHeaderClearHotUpdated(), HeapTupleHeaderSetCmax(), HeapTupleHeaderSetCmin(), HeapTupleHeaderSetHotUpdated(), HeapTupleHeaderSetXmax(), HeapTupleHeaderSetXmin(), InvalidBuffer, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsNormal, ItemPointerSet(), len, MarkBufferDirty(), MaxHeapTupleSize, MemSet, xl_heap_update::new_offnum, xl_heap_update::new_xmax, xl_heap_update::old_infobits_set, xl_heap_update::old_offnum, xl_heap_update::old_xmax, PageAddItem, PageClearAllVisible(), PageGetHeapFreeSpace(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageInit(), PageSetLSN(), PageSetPrunable, PANIC, ReleaseBuffer(), SizeOfHeapHeader, SizeofHeapTupleHeader, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, UnlockReleaseBuffer(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED, XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED, XLH_UPDATE_PREFIX_FROM_OLD, XLH_UPDATE_SUFFIX_FROM_OLD, XLOG_HEAP_INIT_PAGE, XLogInitBufferForRedo(), XLogReadBufferForRedo(), XLogRecGetBlockData(), XLogRecGetBlockTag(), XLogRecGetBlockTagExtended(), XLogRecGetData, XLogRecGetInfo, XLogRecGetXid, and XLogRecordPageWithFreeSpace().

Referenced by heap_redo().

◆ heap_xlog_visible()

static void heap_xlog_visible ( XLogReaderState record)
static

Definition at line 255 of file heapam_xlog.c.

256{
257 XLogRecPtr lsn = record->EndRecPtr;
259 Buffer vmbuffer = InvalidBuffer;
260 Buffer buffer;
261 Page page;
262 RelFileLocator rlocator;
263 BlockNumber blkno;
265
266 Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
267
268 XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
269
270 /*
271 * If there are any Hot Standby transactions running that have an xmin
272 * horizon old enough that this page isn't all-visible for them, they
273 * might incorrectly decide that an index-only scan can skip a heap fetch.
274 *
275 * NB: It might be better to throw some kind of "soft" conflict here that
276 * forces any index-only scan that is in flight to perform heap fetches,
277 * rather than killing the transaction outright.
278 */
279 if (InHotStandby)
282 rlocator);
283
284 /*
285 * Read the heap page, if it still exists. If the heap file has dropped or
286 * truncated later in recovery, we don't need to update the page, but we'd
287 * better still update the visibility map.
288 */
289 action = XLogReadBufferForRedo(record, 1, &buffer);
290 if (action == BLK_NEEDS_REDO)
291 {
292 /*
293 * We don't bump the LSN of the heap page when setting the visibility
294 * map bit (unless checksums or wal_hint_bits is enabled, in which
295 * case we must). This exposes us to torn page hazards, but since
296 * we're not inspecting the existing page contents in any way, we
297 * don't care.
298 */
299 page = BufferGetPage(buffer);
300
301 PageSetAllVisible(page);
302
304 PageSetLSN(page, lsn);
305
306 MarkBufferDirty(buffer);
307 }
308 else if (action == BLK_RESTORED)
309 {
310 /*
311 * If heap block was backed up, we already restored it and there's
312 * nothing more to do. (This can only happen with checksums or
313 * wal_log_hints enabled.)
314 */
315 }
316
317 if (BufferIsValid(buffer))
318 {
319 Size space = PageGetFreeSpace(BufferGetPage(buffer));
320
321 UnlockReleaseBuffer(buffer);
322
323 /*
324 * Since FSM is not WAL-logged and only updated heuristically, it
325 * easily becomes stale in standbys. If the standby is later promoted
326 * and runs VACUUM, it will skip updating individual free space
327 * figures for pages that became all-visible (or all-frozen, depending
328 * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
329 * propagates too optimistic free space values to upper FSM layers;
330 * later inserters try to use such pages only to find out that they
331 * are unusable. This can cause long stalls when there are many such
332 * pages.
333 *
334 * Forestall those problems by updating FSM's idea about a page that
335 * is becoming all-visible or all-frozen.
336 *
337 * Do this regardless of a full-page image being applied, since the
338 * FSM data is not in the page anyway.
339 */
340 if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
341 XLogRecordPageWithFreeSpace(rlocator, blkno, space);
342 }
343
344 /*
345 * Even if we skipped the heap page update due to the LSN interlock, it's
346 * still safe to update the visibility map. Any WAL record that clears
347 * the visibility map bit does so before checking the page LSN, so any
348 * bits that need to be cleared will still be cleared.
349 */
351 &vmbuffer) == BLK_NEEDS_REDO)
352 {
353 Page vmpage = BufferGetPage(vmbuffer);
354 Relation reln;
355 uint8 vmbits;
356
357 /* initialize the page if it was read as zeros */
358 if (PageIsNew(vmpage))
359 PageInit(vmpage, BLCKSZ, 0);
360
361 /* remove VISIBILITYMAP_XLOG_* */
362 vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
363
364 /*
365 * XLogReadBufferForRedoExtended locked the buffer. But
366 * visibilitymap_set will handle locking itself.
367 */
369
370 reln = CreateFakeRelcacheEntry(rlocator);
371
372 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
373 xlrec->snapshotConflictHorizon, vmbits);
374
375 ReleaseBuffer(vmbuffer);
377 }
378 else if (BufferIsValid(vmbuffer))
379 UnlockReleaseBuffer(vmbuffer);
380}
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5605
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:203
Size PageGetFreeSpace(const PageData *page)
Definition: bufpage.c:906
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:454
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define VISIBILITYMAP_XLOG_VALID_BITS
#define VISIBILITYMAP_XLOG_CATALOG_REL
@ BLK_RESTORED
Definition: xlogutils.h:76

References generate_unaccent_rules::action, Assert(), BLK_NEEDS_REDO, BLK_RESTORED, BUFFER_LOCK_UNLOCK, BufferGetPage(), BufferIsValid(), CreateFakeRelcacheEntry(), XLogReaderState::EndRecPtr, xl_heap_visible::flags, FreeFakeRelcacheEntry(), InHotStandby, InvalidBuffer, LockBuffer(), MarkBufferDirty(), PageGetFreeSpace(), PageInit(), PageIsNew(), PageSetAllVisible(), PageSetLSN(), RBM_ZERO_ON_ERROR, ReleaseBuffer(), ResolveRecoveryConflictWithSnapshot(), xl_heap_visible::snapshotConflictHorizon, UnlockReleaseBuffer(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, VISIBILITYMAP_XLOG_CATALOG_REL, VISIBILITYMAP_XLOG_VALID_BITS, XLogHintBitIsNeeded, XLogReadBufferForRedo(), XLogReadBufferForRedoExtended(), XLogRecGetBlockTag(), XLogRecGetData, and XLogRecordPageWithFreeSpace().

Referenced by heap2_redo().