diff options
-rw-r--r-- | src/backend/access/gin/ginxlog.c | 20 | ||||
-rw-r--r-- | src/backend/access/gist/gist.c | 14 | ||||
-rw-r--r-- | src/backend/access/gist/gistget.c | 2 | ||||
-rw-r--r-- | src/backend/access/gist/gistvacuum.c | 2 | ||||
-rw-r--r-- | src/backend/access/gist/gistxlog.c | 4 | ||||
-rw-r--r-- | src/backend/access/heap/heapam.c | 22 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtxlog.c | 16 | ||||
-rw-r--r-- | src/backend/access/spgist/spgxlog.c | 36 | ||||
-rw-r--r-- | src/backend/access/transam/clog.c | 2 | ||||
-rw-r--r-- | src/backend/access/transam/slru.c | 2 | ||||
-rw-r--r-- | src/backend/access/transam/timeline.c | 4 | ||||
-rw-r--r-- | src/backend/access/transam/twophase.c | 2 | ||||
-rw-r--r-- | src/backend/access/transam/xlog.c | 144 | ||||
-rw-r--r-- | src/backend/commands/sequence.c | 2 | ||||
-rw-r--r-- | src/backend/replication/syncrep.c | 12 | ||||
-rw-r--r-- | src/backend/replication/walreceiver.c | 12 | ||||
-rw-r--r-- | src/backend/replication/walreceiverfuncs.c | 2 | ||||
-rw-r--r-- | src/backend/replication/walsender.c | 26 | ||||
-rw-r--r-- | src/bin/pg_basebackup/receivelog.c | 2 | ||||
-rw-r--r-- | src/include/access/xlogdefs.h | 14 |
20 files changed, 163 insertions, 177 deletions
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index 0ff66c875bc..fc1f0a5c471 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -177,7 +177,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) return; /* page was deleted, nothing to do */ page = (Page) BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { if (data->isData) { @@ -393,7 +393,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { if (GinPageIsData(page)) { @@ -448,7 +448,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(dbuffer)) { page = BufferGetPage(dbuffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { Assert(GinPageIsData(page)); GinPageGetOpaque(page)->flags = GIN_DELETED; @@ -467,7 +467,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(pbuffer)) { page = BufferGetPage(pbuffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { Assert(GinPageIsData(page)); Assert(!GinPageIsLeaf(page)); @@ -487,7 +487,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(lbuffer)) { page = BufferGetPage(lbuffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { Assert(GinPageIsData(page)); GinPageGetOpaque(page)->rightlink = data->rightLink; @@ -518,7 +518,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) return; /* assume index was deleted, nothing to do */ metapage = BufferGetPage(metabuffer); - if (!XLByteLE(lsn, PageGetLSN(metapage))) + if (lsn > PageGetLSN(metapage)) { memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData)); PageSetLSN(metapage, lsn); @@ -540,7 +540,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) { Page page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { OffsetNumber l, off = (PageIsEmpty(page)) ? FirstOffsetNumber : @@ -590,7 +590,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) { Page page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { GinPageGetOpaque(page)->rightlink = data->newRightlink; @@ -677,7 +677,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record) return; /* assume index was deleted, nothing to do */ metapage = BufferGetPage(metabuffer); - if (!XLByteLE(lsn, PageGetLSN(metapage))) + if (lsn > PageGetLSN(metapage)) { memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData)); PageSetLSN(metapage, lsn); @@ -703,7 +703,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record) { Page page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { GinPageGetOpaque(page)->flags = GIN_DELETED; diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9c6625bba3a..700e97afc39 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -561,8 +561,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) } if (stack->blkno != GIST_ROOT_BLKNO && - XLByteLT(stack->parent->lsn, - GistPageGetOpaque(stack->page)->nsn)) + stack->parent->lsn < GistPageGetOpaque(stack->page)->nsn) { /* * Concurrent split detected. There's no guarantee that the @@ -620,7 +619,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) xlocked = true; stack->page = (Page) BufferGetPage(stack->buffer); - if (!XLByteEQ(PageGetLSN(stack->page), stack->lsn)) + if (PageGetLSN(stack->page) != stack->lsn) { /* the page was changed while we unlocked it, retry */ continue; @@ -708,8 +707,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) */ } else if (GistFollowRight(stack->page) || - XLByteLT(stack->parent->lsn, - GistPageGetOpaque(stack->page)->nsn)) + stack->parent->lsn < + GistPageGetOpaque(stack->page)->nsn) { /* * The page was split while we momentarily unlocked the @@ -794,7 +793,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum) if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); - if (top->parent && XLByteLT(top->parent->lsn, GistPageGetOpaque(page)->nsn) && + if (top->parent && top->parent->lsn < GistPageGetOpaque(page)->nsn && GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ ) { /* @@ -864,7 +863,8 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child) parent->page = (Page) BufferGetPage(parent->buffer); /* here we don't need to distinguish between split and page update */ - if (child->downlinkoffnum == InvalidOffsetNumber || !XLByteEQ(parent->lsn, PageGetLSN(parent->page))) + if (child->downlinkoffnum == InvalidOffsetNumber || + parent->lsn != PageGetLSN(parent->page)) { /* parent is changed, look child in right links until found */ OffsetNumber i, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 2253e7c0eb7..0e1fd80280b 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -263,7 +263,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, */ if (!XLogRecPtrIsInvalid(pageItem->data.parentlsn) && (GistFollowRight(page) || - XLByteLT(pageItem->data.parentlsn, opaque->nsn)) && + pageItem->data.parentlsn < opaque->nsn) && opaque->rightlink != InvalidBlockNumber /* sanity check */ ) { /* There was a page split, follow right link to add pages */ diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index f2a7a872662..3fbcc6f52c0 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -114,7 +114,7 @@ pushStackIfSplited(Page page, GistBDItem *stack) GISTPageOpaque opaque = GistPageGetOpaque(page); if (stack->blkno != GIST_ROOT_BLKNO && !XLogRecPtrIsInvalid(stack->parentlsn) && - (GistFollowRight(page) || XLByteLT(stack->parentlsn, opaque->nsn)) && + (GistFollowRight(page) || stack->parentlsn < opaque->nsn) && opaque->rightlink != InvalidBlockNumber /* sanity check */ ) { /* split page detected, install right link to the stack */ diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index f9c8fcbcf59..f802c23f723 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -64,7 +64,7 @@ gistRedoClearFollowRight(XLogRecPtr lsn, XLogRecord *record, int block_index, * of this record, because the updated NSN is not included in the full * page image. */ - if (!XLByteLT(lsn, PageGetLSN(page))) + if (lsn >= PageGetLSN(page)) { GistPageGetOpaque(page)->nsn = lsn; GistClearFollowRight(page); @@ -119,7 +119,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) page = (Page) BufferGetPage(buffer); /* nothing more to do if change already applied */ - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); return; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 186fb8711b5..ac8407b4aef 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -4700,7 +4700,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record) LockBufferForCleanup(buffer); page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); return; @@ -4770,7 +4770,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); return; @@ -4854,7 +4854,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) * XLOG record's LSN, we mustn't mark the page all-visible, because * the subsequent update won't be replayed to clear the flag. */ - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { PageSetAllVisible(page); MarkBufferDirty(buffer); @@ -4891,7 +4891,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record) * we did for the heap page. If this results in a dropped bit, no * real harm is done; and the next VACUUM will fix it. */ - if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer)))) + if (lsn > PageGetLSN(BufferGetPage(vmbuffer))) visibilitymap_set(reln, xlrec->block, lsn, vmbuffer, xlrec->cutoff_xid); @@ -4977,7 +4977,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(buffer); return; @@ -5072,7 +5072,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(buffer); return; @@ -5207,7 +5207,7 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(buffer); return; @@ -5349,7 +5349,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update) goto newt; page = (Page) BufferGetPage(obuffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { if (samepage) { @@ -5449,7 +5449,7 @@ newt:; return; page = (Page) BufferGetPage(nbuffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(nbuffer); if (BufferIsValid(obuffer)) @@ -5549,7 +5549,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(buffer); return; @@ -5612,7 +5612,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */ + if (lsn <= PageGetLSN(page)) /* changes are applied */ { UnlockReleaseBuffer(buffer); return; diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 9f850ab05ff..c91408d214e 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -229,7 +229,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, { page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); } @@ -381,7 +381,7 @@ btree_xlog_split(bool onleft, bool isroot, Page lpage = (Page) BufferGetPage(lbuf); BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage); - if (!XLByteLE(lsn, PageGetLSN(lpage))) + if (lsn > PageGetLSN(lpage)) { OffsetNumber off; OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage); @@ -459,7 +459,7 @@ btree_xlog_split(bool onleft, bool isroot, { Page page = (Page) BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page); @@ -537,7 +537,7 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record) LockBufferForCleanup(buffer); page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); return; @@ -757,7 +757,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record) return; page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); return; @@ -820,7 +820,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record) { page = (Page) BufferGetPage(buffer); pageop = (BTPageOpaque) PageGetSpecialPointer(page); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); } @@ -867,7 +867,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); } @@ -895,7 +895,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = (Page) BufferGetPage(buffer); - if (XLByteLE(lsn, PageGetLSN(page))) + if (lsn <= PageGetLSN(page)) { UnlockReleaseBuffer(buffer); } diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 2a874a2f16b..9a7aaf75e30 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -139,7 +139,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) SpGistInitBuffer(buffer, SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { /* insert new tuple */ if (xldata->offnumLeaf != xldata->offnumHeadLeaf) @@ -187,7 +187,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistInnerTuple tuple; @@ -251,7 +251,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) SpGistInitBuffer(buffer, SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0)); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { int i; @@ -280,7 +280,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves, state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT, @@ -305,7 +305,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistInnerTuple tuple; @@ -353,7 +353,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { PageIndexTupleDelete(page, xldata->offnum); if (PageAddItem(page, (Item) innerTuple, innerTuple->size, @@ -399,7 +399,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) if (xldata->newPage) SpGistInitBuffer(buffer, 0); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { addOrReplaceTuple(page, (Item) innerTuple, innerTuple->size, xldata->offnumNew); @@ -430,7 +430,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistDeadTuple dt; @@ -495,7 +495,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistInnerTuple innerTuple; @@ -552,7 +552,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record) if (xldata->newPage) SpGistInitBuffer(buffer, 0); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { addOrReplaceTuple(page, (Item) postfixTuple, postfixTuple->size, xldata->offnumPostfix); @@ -574,7 +574,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { PageIndexTupleDelete(page, xldata->offnumPrefix); if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size, @@ -670,7 +670,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(srcBuffer)) { srcPage = BufferGetPage(srcBuffer); - if (!XLByteLE(lsn, PageGetLSN(srcPage))) + if (lsn > PageGetLSN(srcPage)) { /* * We have it a bit easier here than in doPickSplit(), @@ -737,7 +737,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(destBuffer)) { destPage = (Page) BufferGetPage(destBuffer); - if (XLByteLE(lsn, PageGetLSN(destPage))) + if (lsn <= PageGetLSN(destPage)) destPage = NULL; /* don't do any page updates */ } else @@ -790,7 +790,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) SpGistInitBuffer(buffer, (xldata->storesNulls ? SPGIST_NULLS : 0)); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { addOrReplaceTuple(page, (Item) innerTuple, innerTuple->size, xldata->offnumInner); @@ -842,7 +842,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistInnerTuple parent; @@ -900,7 +900,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { spgPageIndexMultiDelete(&state, page, toDead, xldata->nDead, @@ -971,7 +971,7 @@ spgRedoVacuumRoot(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { /* The tuple numbers are in order */ PageIndexMultiDelete(page, toDelete, xldata->nDelete); @@ -1017,7 +1017,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record) if (BufferIsValid(buffer)) { page = BufferGetPage(buffer); - if (!XLByteLE(lsn, PageGetLSN(page))) + if (lsn > PageGetLSN(page)) { SpGistPageOpaque opaque = SpGistPageGetOpaque(page); int i; diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index e3fd56dd2ba..2d274cf77ab 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -365,7 +365,7 @@ TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, i { int lsnindex = GetLSNIndex(slotno, xid); - if (XLByteLT(ClogCtl->shared->group_lsn[lsnindex], lsn)) + if (ClogCtl->shared->group_lsn[lsnindex] < lsn) ClogCtl->shared->group_lsn[lsnindex] = lsn; } } diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index b8f60d693f7..ec2509b165a 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -685,7 +685,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) { XLogRecPtr this_lsn = shared->group_lsn[lsnindex++]; - if (XLByteLT(max_lsn, this_lsn)) + if (max_lsn < this_lsn) max_lsn = this_lsn; } diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c index b33d230c701..432cc1463c3 100644 --- a/src/backend/access/transam/timeline.c +++ b/src/backend/access/transam/timeline.c @@ -522,8 +522,8 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history) foreach(cell, history) { TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell); - if ((XLogRecPtrIsInvalid(tle->begin) || XLByteLE(tle->begin, ptr)) && - (XLogRecPtrIsInvalid(tle->end) || XLByteLT(ptr, tle->end))) + if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) && + (XLogRecPtrIsInvalid(tle->end) || ptr < tle->end)) { /* found it */ return tle->tli; diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 3a0b190abcf..a7e90e473a1 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1559,7 +1559,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno]; if (gxact->valid && - XLByteLE(gxact->prepare_lsn, redo_horizon)) + gxact->prepare_lsn <= redo_horizon) xids[nxids++] = pgxact->xid; } diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 0b1a9c21d5b..2998f60c5f9 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -926,9 +926,9 @@ begin:; * affect the contents of the XLOG record, so we'll update our local copy * but not force a recomputation. */ - if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr)) + if (RedoRecPtr != Insert->RedoRecPtr) { - Assert(XLByteLT(RedoRecPtr, Insert->RedoRecPtr)); + Assert(RedoRecPtr < Insert->RedoRecPtr); RedoRecPtr = Insert->RedoRecPtr; if (doPageWrites) @@ -938,7 +938,7 @@ begin:; if (dtbuf[i] == InvalidBuffer) continue; if (dtbuf_bkp[i] == false && - XLByteLE(dtbuf_lsn[i], RedoRecPtr)) + dtbuf_lsn[i] <= RedoRecPtr) { /* * Oops, this buffer now needs to be backed up, but we @@ -1002,7 +1002,7 @@ begin:; LWLockAcquire(WALWriteLock, LW_EXCLUSIVE); LogwrtResult = XLogCtl->LogwrtResult; - if (!XLByteLE(RecPtr, LogwrtResult.Flush)) + if (LogwrtResult.Flush < RecPtr) { XLogwrtRqst FlushRqst; @@ -1150,9 +1150,9 @@ begin:; SpinLockAcquire(&xlogctl->info_lck); xlogctl->LogwrtResult = LogwrtResult; - if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write)) + if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write) xlogctl->LogwrtRqst.Write = LogwrtResult.Write; - if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush)) + if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush) xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush; SpinLockRelease(&xlogctl->info_lck); } @@ -1188,7 +1188,7 @@ begin:; SpinLockAcquire(&xlogctl->info_lck); /* advance global request to include new block(s) */ - if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst)) + if (xlogctl->LogwrtRqst.Write < WriteRqst) xlogctl->LogwrtRqst.Write = WriteRqst; /* update local result copy while I have the chance */ LogwrtResult = xlogctl->LogwrtResult; @@ -1227,7 +1227,7 @@ XLogCheckBuffer(XLogRecData *rdata, bool doPageWrites, *lsn = PageGetLSN(page); if (doPageWrites && - XLByteLE(PageGetLSN(page), RedoRecPtr)) + PageGetLSN(page) <= RedoRecPtr) { /* * The page needs to be backed up, so set up *bkpb @@ -1300,7 +1300,7 @@ AdvanceXLInsertBuffer(bool new_segment) * written out. */ OldPageRqstPtr = XLogCtl->xlblocks[nextidx]; - if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write)) + if (LogwrtResult.Write < OldPageRqstPtr) { /* nope, got work to do... */ XLogRecPtr FinishedPageRqstPtr; @@ -1313,7 +1313,7 @@ AdvanceXLInsertBuffer(bool new_segment) volatile XLogCtlData *xlogctl = XLogCtl; SpinLockAcquire(&xlogctl->info_lck); - if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr)) + if (xlogctl->LogwrtRqst.Write < FinishedPageRqstPtr) xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr; LogwrtResult = xlogctl->LogwrtResult; SpinLockRelease(&xlogctl->info_lck); @@ -1325,12 +1325,12 @@ AdvanceXLInsertBuffer(bool new_segment) * Now that we have an up-to-date LogwrtResult value, see if we still * need to write it or if someone else already did. */ - if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write)) + if (LogwrtResult.Write < OldPageRqstPtr) { /* Must acquire write lock */ LWLockAcquire(WALWriteLock, LW_EXCLUSIVE); LogwrtResult = XLogCtl->LogwrtResult; - if (XLByteLE(OldPageRqstPtr, LogwrtResult.Write)) + if (LogwrtResult.Write >= OldPageRqstPtr) { /* OK, someone wrote it already */ LWLockRelease(WALWriteLock); @@ -1361,12 +1361,11 @@ AdvanceXLInsertBuffer(bool new_segment) { /* force it to a segment start point */ if (NewPageBeginPtr % XLogSegSize != 0) - XLByteAdvance(NewPageBeginPtr, - XLogSegSize - NewPageBeginPtr % XLogSegSize); + NewPageBeginPtr += XLogSegSize - NewPageBeginPtr % XLogSegSize; } NewPageEndPtr = NewPageBeginPtr; - XLByteAdvance(NewPageEndPtr, XLOG_BLCKSZ); + NewPageEndPtr += XLOG_BLCKSZ; XLogCtl->xlblocks[nextidx] = NewPageEndPtr; NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) XLOG_BLCKSZ); @@ -1503,14 +1502,14 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) */ curridx = Write->curridx; - while (XLByteLT(LogwrtResult.Write, WriteRqst.Write)) + while (LogwrtResult.Write < WriteRqst.Write) { /* * Make sure we're not ahead of the insert process. This could happen * if we're passed a bogus WriteRqst.Write that is past the end of the * last page that's been initialized by AdvanceXLInsertBuffer. */ - if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[curridx])) + if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx]) elog(PANIC, "xlog write request %X/%X is past end of log %X/%X", (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write, (uint32) (XLogCtl->xlblocks[curridx] >> 32), @@ -1518,7 +1517,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) /* Advance LogwrtResult.Write to end of current buffer page */ LogwrtResult.Write = XLogCtl->xlblocks[curridx]; - ispartialpage = XLByteLT(WriteRqst.Write, LogwrtResult.Write); + ispartialpage = WriteRqst.Write < LogwrtResult.Write; if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo)) { @@ -1560,7 +1559,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) * contiguous in memory), or if we are at the end of the logfile * segment. */ - last_iteration = !XLByteLT(LogwrtResult.Write, WriteRqst.Write); + last_iteration = WriteRqst.Write <= LogwrtResult.Write; finishing_seg = !ispartialpage && (startoffset + npages * XLOG_BLCKSZ) >= XLogSegSize; @@ -1671,8 +1670,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) /* * If asked to flush, do so */ - if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) && - XLByteLT(LogwrtResult.Flush, LogwrtResult.Write)) + if (LogwrtResult.Flush < WriteRqst.Flush && + LogwrtResult.Flush < LogwrtResult.Write) + { /* * Could get here without iterating above loop, in which case we might @@ -1714,9 +1714,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch) SpinLockAcquire(&xlogctl->info_lck); xlogctl->LogwrtResult = LogwrtResult; - if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write)) + if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write) xlogctl->LogwrtRqst.Write = LogwrtResult.Write; - if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush)) + if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush) xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush; SpinLockRelease(&xlogctl->info_lck); } @@ -1739,7 +1739,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) SpinLockAcquire(&xlogctl->info_lck); LogwrtResult = xlogctl->LogwrtResult; sleeping = xlogctl->WalWriterSleeping; - if (XLByteLT(xlogctl->asyncXactLSN, asyncXactLSN)) + if (xlogctl->asyncXactLSN < asyncXactLSN) xlogctl->asyncXactLSN = asyncXactLSN; SpinLockRelease(&xlogctl->info_lck); @@ -1754,7 +1754,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ; /* if we have already flushed that far, we're done */ - if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush)) + if (WriteRqstPtr <= LogwrtResult.Flush) return; } @@ -1780,7 +1780,7 @@ static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) { /* Quick check using our local copy of the variable */ - if (!updateMinRecoveryPoint || (!force && XLByteLE(lsn, minRecoveryPoint))) + if (!updateMinRecoveryPoint || (!force && lsn <= minRecoveryPoint)) return; LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); @@ -1796,7 +1796,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) */ if (minRecoveryPoint == 0) updateMinRecoveryPoint = false; - else if (force || XLByteLT(minRecoveryPoint, lsn)) + else if (force || minRecoveryPoint < lsn) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; @@ -1821,7 +1821,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) newMinRecoveryPointTLI = xlogctl->replayEndTLI; SpinLockRelease(&xlogctl->info_lck); - if (!force && XLByteLT(newMinRecoveryPoint, lsn)) + if (!force && newMinRecoveryPoint < lsn) elog(WARNING, "xlog min recovery request %X/%X is past current point %X/%X", (uint32) (lsn >> 32) , (uint32) lsn, @@ -1829,7 +1829,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) (uint32) newMinRecoveryPoint); /* update control file */ - if (XLByteLT(ControlFile->minRecoveryPoint, newMinRecoveryPoint)) + if (ControlFile->minRecoveryPoint < newMinRecoveryPoint) { ControlFile->minRecoveryPoint = newMinRecoveryPoint; ControlFile->minRecoveryPointTLI = newMinRecoveryPointTLI; @@ -1873,7 +1873,7 @@ XLogFlush(XLogRecPtr record) } /* Quick exit if already known flushed */ - if (XLByteLE(record, LogwrtResult.Flush)) + if (record <= LogwrtResult.Flush) return; #ifdef WAL_DEBUG @@ -1908,13 +1908,13 @@ XLogFlush(XLogRecPtr record) /* read LogwrtResult and update local state */ SpinLockAcquire(&xlogctl->info_lck); - if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write)) + if (WriteRqstPtr < xlogctl->LogwrtRqst.Write) WriteRqstPtr = xlogctl->LogwrtRqst.Write; LogwrtResult = xlogctl->LogwrtResult; SpinLockRelease(&xlogctl->info_lck); /* done already? */ - if (XLByteLE(record, LogwrtResult.Flush)) + if (record <= LogwrtResult.Flush) break; /* @@ -1936,7 +1936,7 @@ XLogFlush(XLogRecPtr record) /* Got the lock; recheck whether request is satisfied */ LogwrtResult = XLogCtl->LogwrtResult; - if (XLByteLE(record, LogwrtResult.Flush)) + if (record <= LogwrtResult.Flush) { LWLockRelease(WALWriteLock); break; @@ -2010,7 +2010,7 @@ XLogFlush(XLogRecPtr record) * calls from bufmgr.c are not within critical sections and so we will not * force a restart for a bad LSN on a data page. */ - if (XLByteLT(LogwrtResult.Flush, record)) + if (LogwrtResult.Flush < record) elog(ERROR, "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X", (uint32) (record >> 32), (uint32) record, @@ -2060,7 +2060,7 @@ XLogBackgroundFlush(void) WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ; /* if we have already flushed that far, consider async commit records */ - if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush)) + if (WriteRqstPtr <= LogwrtResult.Flush) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; @@ -2076,7 +2076,7 @@ XLogBackgroundFlush(void) * holding an open file handle to a logfile that's no longer in use, * preventing the file from being deleted. */ - if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush)) + if (WriteRqstPtr <= LogwrtResult.Flush) { if (openLogFile >= 0) { @@ -2101,7 +2101,7 @@ XLogBackgroundFlush(void) /* now wait for the write lock */ LWLockAcquire(WALWriteLock, LW_EXCLUSIVE); LogwrtResult = XLogCtl->LogwrtResult; - if (!XLByteLE(WriteRqstPtr, LogwrtResult.Flush)) + if (WriteRqstPtr > LogwrtResult.Flush) { XLogwrtRqst WriteRqst; @@ -2137,7 +2137,7 @@ XLogNeedsFlush(XLogRecPtr record) if (RecoveryInProgress()) { /* Quick exit if already known updated */ - if (XLByteLE(record, minRecoveryPoint) || !updateMinRecoveryPoint) + if (record <= minRecoveryPoint || !updateMinRecoveryPoint) return false; /* @@ -2160,14 +2160,14 @@ XLogNeedsFlush(XLogRecPtr record) updateMinRecoveryPoint = false; /* check again */ - if (XLByteLE(record, minRecoveryPoint) || !updateMinRecoveryPoint) + if (record <= minRecoveryPoint || !updateMinRecoveryPoint) return false; else return true; } /* Quick exit if already known flushed */ - if (XLByteLE(record, LogwrtResult.Flush)) + if (record <= LogwrtResult.Flush) return false; /* read LogwrtResult and update local state */ @@ -2181,7 +2181,7 @@ XLogNeedsFlush(XLogRecPtr record) } /* check again */ - if (XLByteLE(record, LogwrtResult.Flush)) + if (record <= LogwrtResult.Flush) return false; return true; @@ -3489,7 +3489,7 @@ retry: do { /* Calculate pointer to beginning of next page */ - XLByteAdvance(pagelsn, XLOG_BLCKSZ); + pagelsn += XLOG_BLCKSZ; /* Wait for the next page to become available */ if (!XLogPageRead(&pagelsn, emode, false, false)) return NULL; @@ -3674,7 +3674,7 @@ ValidXLogPageHeader(XLogPageHeader hdr, int emode, bool segmentonly) return false; } - if (!XLByteEQ(hdr->xlp_pageaddr, recaddr)) + if (hdr->xlp_pageaddr != recaddr) { ereport(emode_for_corrupt_record(emode, recaddr), (errmsg("unexpected pageaddr %X/%X in log segment %s, offset %u", @@ -3785,7 +3785,7 @@ ValidXLogRecordHeader(XLogRecPtr *RecPtr, XLogRecord *record, int emode, * We can't exactly verify the prev-link, but surely it should be less * than the record's own address. */ - if (!XLByteLT(record->xl_prev, *RecPtr)) + if (!(record->xl_prev < *RecPtr)) { ereport(emode_for_corrupt_record(emode, *RecPtr), (errmsg("record with incorrect prev-link %X/%X at %X/%X", @@ -3801,7 +3801,7 @@ ValidXLogRecordHeader(XLogRecPtr *RecPtr, XLogRecord *record, int emode, * check guards against torn WAL pages where a stale but valid-looking * WAL record starts on a sector boundary. */ - if (!XLByteEQ(record->xl_prev, ReadRecPtr)) + if (record->xl_prev != ReadRecPtr) { ereport(emode_for_corrupt_record(emode, *RecPtr), (errmsg("record with incorrect prev-link %X/%X at %X/%X", @@ -3873,7 +3873,7 @@ rescanLatestTimeLine(void) * next timeline was forked off from it *after* the current recovery * location. */ - if (XLByteLT(currentTle->end, EndRecPtr)) + if (currentTle->end < EndRecPtr) { ereport(LOG, (errmsg("new timeline %u forked off current database system timeline %u before current recovery point %X/%X", @@ -5438,7 +5438,7 @@ StartupXLOG(void) * backup_label around that references a WAL segment that's * already been archived. */ - if (XLByteLT(checkPoint.redo, checkPointLoc)) + if (checkPoint.redo < checkPointLoc) { if (!ReadRecord(&(checkPoint.redo), LOG, false)) ereport(FATAL, @@ -5539,7 +5539,7 @@ StartupXLOG(void) RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo; - if (XLByteLT(RecPtr, checkPoint.redo)) + if (RecPtr < checkPoint.redo) ereport(PANIC, (errmsg("invalid redo in checkpoint record"))); @@ -5548,7 +5548,7 @@ StartupXLOG(void) * have been a clean shutdown and we did not have a recovery.conf file, * then assume no recovery needed. */ - if (XLByteLT(checkPoint.redo, RecPtr)) + if (checkPoint.redo < RecPtr) { if (wasShutdown) ereport(PANIC, @@ -5593,7 +5593,7 @@ StartupXLOG(void) if (InArchiveRecovery) { /* initialize minRecoveryPoint if not set yet */ - if (XLByteLT(ControlFile->minRecoveryPoint, checkPoint.redo)) + if (ControlFile->minRecoveryPoint < checkPoint.redo) { ControlFile->minRecoveryPoint = checkPoint.redo; ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID; @@ -5797,7 +5797,7 @@ StartupXLOG(void) * Find the first record that logically follows the checkpoint --- it * might physically precede it, though. */ - if (XLByteLT(checkPoint.redo, RecPtr)) + if (checkPoint.redo < RecPtr) { /* back up to find the record */ record = ReadRecord(&(checkPoint.redo), PANIC, false); @@ -6048,7 +6048,7 @@ StartupXLOG(void) * advanced beyond the WAL we processed. */ if (InRecovery && - (XLByteLT(EndOfLog, minRecoveryPoint) || + (EndOfLog < minRecoveryPoint || !XLogRecPtrIsInvalid(ControlFile->backupStartPoint))) { if (reachedStopPoint) @@ -6377,7 +6377,7 @@ CheckRecoveryConsistency(void) * Have we reached the point where our base backup was completed? */ if (!XLogRecPtrIsInvalid(ControlFile->backupEndPoint) && - XLByteLE(ControlFile->backupEndPoint, EndRecPtr)) + ControlFile->backupEndPoint <= EndRecPtr) { /* * We have reached the end of base backup, as indicated by pg_control. @@ -6390,7 +6390,7 @@ CheckRecoveryConsistency(void) LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); - if (XLByteLT(ControlFile->minRecoveryPoint, EndRecPtr)) + if (ControlFile->minRecoveryPoint < EndRecPtr) ControlFile->minRecoveryPoint = EndRecPtr; ControlFile->backupStartPoint = InvalidXLogRecPtr; @@ -6409,7 +6409,7 @@ CheckRecoveryConsistency(void) * consistent yet. */ if (!reachedConsistency && !ControlFile->backupEndRequired && - XLByteLE(minRecoveryPoint, XLogCtl->lastReplayedEndRecPtr) && + minRecoveryPoint <= XLogCtl->lastReplayedEndRecPtr && XLogRecPtrIsInvalid(ControlFile->backupStartPoint)) { /* @@ -6717,7 +6717,7 @@ GetRedoRecPtr(void) volatile XLogCtlData *xlogctl = XLogCtl; SpinLockAcquire(&xlogctl->info_lck); - Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr)); + Assert(RedoRecPtr <= xlogctl->Insert.RedoRecPtr); RedoRecPtr = xlogctl->Insert.RedoRecPtr; SpinLockRelease(&xlogctl->info_lck); @@ -7309,7 +7309,7 @@ CreateCheckPoint(int flags) * We now have ProcLastRecPtr = start of actual checkpoint record, recptr * = end of actual checkpoint record. */ - if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr)) + if (shutdown && checkPoint.redo != ProcLastRecPtr) ereport(PANIC, (errmsg("concurrent transaction log activity while database system is shutting down"))); @@ -7542,7 +7542,7 @@ CreateRestartPoint(int flags) * side-effect. */ if (XLogRecPtrIsInvalid(lastCheckPointRecPtr) || - XLByteLE(lastCheckPoint.redo, ControlFile->checkPointCopy.redo)) + lastCheckPoint.redo <= ControlFile->checkPointCopy.redo) { ereport(DEBUG2, (errmsg("skipping restartpoint, already performed at %X/%X", @@ -7605,7 +7605,7 @@ CreateRestartPoint(int flags) */ LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY && - XLByteLT(ControlFile->checkPointCopy.redo, lastCheckPoint.redo)) + ControlFile->checkPointCopy.redo < lastCheckPoint.redo) { ControlFile->prevCheckPoint = ControlFile->checkPoint; ControlFile->checkPoint = lastCheckPointRecPtr; @@ -7944,7 +7944,7 @@ checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI) * new timeline. */ if (!XLogRecPtrIsInvalid(minRecoveryPoint) && - XLByteLT(lsn, minRecoveryPoint) && + lsn < minRecoveryPoint && newTLI > minRecoveryPointTLI) ereport(PANIC, (errmsg("unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u", @@ -8143,7 +8143,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) memcpy(&startpoint, XLogRecGetData(record), sizeof(startpoint)); - if (XLByteEQ(ControlFile->backupStartPoint, startpoint)) + if (ControlFile->backupStartPoint == startpoint) { /* * We have reached the end of base backup, the point where @@ -8156,7 +8156,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); - if (XLByteLT(ControlFile->minRecoveryPoint, lsn)) + if (ControlFile->minRecoveryPoint < lsn) { ControlFile->minRecoveryPoint = lsn; ControlFile->minRecoveryPointTLI = ThisTimeLineID; @@ -8191,7 +8191,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) */ minRecoveryPoint = ControlFile->minRecoveryPoint; minRecoveryPointTLI = ControlFile->minRecoveryPointTLI; - if (minRecoveryPoint != 0 && XLByteLT(minRecoveryPoint, lsn)) + if (minRecoveryPoint != 0 && minRecoveryPoint < lsn) { ControlFile->minRecoveryPoint = lsn; ControlFile->minRecoveryPointTLI = ThisTimeLineID; @@ -8219,7 +8219,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) if (!fpw) { SpinLockAcquire(&xlogctl->info_lck); - if (XLByteLT(xlogctl->lastFpwDisableRecPtr, ReadRecPtr)) + if (xlogctl->lastFpwDisableRecPtr < ReadRecPtr) xlogctl->lastFpwDisableRecPtr = ReadRecPtr; SpinLockRelease(&xlogctl->info_lck); } @@ -8584,7 +8584,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) recptr = xlogctl->lastFpwDisableRecPtr; SpinLockRelease(&xlogctl->info_lck); - if (!checkpointfpw || XLByteLE(startpoint, recptr)) + if (!checkpointfpw || startpoint <= recptr) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL generated with full_page_writes=off was replayed " @@ -8616,7 +8616,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) * either because only few buffers have been dirtied yet. */ LWLockAcquire(WALInsertLock, LW_SHARED); - if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint)) + if (XLogCtl->Insert.lastBackupStart < startpoint) { XLogCtl->Insert.lastBackupStart = startpoint; gotUniqueStartpoint = true; @@ -8933,7 +8933,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) recptr = xlogctl->lastFpwDisableRecPtr; SpinLockRelease(&xlogctl->info_lck); - if (XLByteLE(startpoint, recptr)) + if (startpoint <= recptr) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("WAL generated with full_page_writes=off was replayed " @@ -9402,7 +9402,7 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt, retry: /* See if we need to retrieve more data */ if (readFile < 0 || - (readSource == XLOG_FROM_STREAM && !XLByteLT(*RecPtr, receivedUpto))) + (readSource == XLOG_FROM_STREAM && receivedUpto <= *RecPtr)) { if (StandbyMode) { @@ -9772,17 +9772,17 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * When we are behind, XLogReceiptTime will not advance, so the * grace time allotted to conflicting queries will decrease. */ - if (XLByteLT(RecPtr, receivedUpto)) + if (RecPtr < receivedUpto) havedata = true; else { XLogRecPtr latestChunkStart; receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI); - if (XLByteLT(RecPtr, receivedUpto) && receiveTLI == curFileTLI) + if (RecPtr < receivedUpto && receiveTLI == curFileTLI) { havedata = true; - if (!XLByteLT(RecPtr, latestChunkStart)) + if (latestChunkStart <= RecPtr) { XLogReceiptTime = GetCurrentTimestamp(); SetCurrentChunkStartTime(XLogReceiptTime); @@ -9884,7 +9884,7 @@ emode_for_corrupt_record(int emode, XLogRecPtr RecPtr) if (readSource == XLOG_FROM_PG_XLOG && emode == LOG) { - if (XLByteEQ(RecPtr, lastComplaint)) + if (RecPtr == lastComplaint) emode = DEBUG1; else lastComplaint = RecPtr; diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 585b5b2bed6..b51d8657305 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -607,7 +607,7 @@ nextval_internal(Oid relid) { XLogRecPtr redoptr = GetRedoRecPtr(); - if (XLByteLE(PageGetLSN(page), redoptr)) + if (PageGetLSN(page) <= redoptr) { /* last update of seq was before checkpoint */ fetch = log = fetch + SEQ_LOG_VALS; diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index a61725eed69..b2908a79fde 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -120,7 +120,7 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN) * be a low cost check. */ if (!WalSndCtl->sync_standbys_defined || - XLByteLE(XactCommitLSN, WalSndCtl->lsn[mode])) + XactCommitLSN <= WalSndCtl->lsn[mode]) { LWLockRelease(SyncRepLock); return; @@ -287,7 +287,7 @@ SyncRepQueueInsert(int mode) * Stop at the queue element that we should after to ensure the queue * is ordered by LSN. */ - if (XLByteLT(proc->waitLSN, MyProc->waitLSN)) + if (proc->waitLSN < MyProc->waitLSN) break; proc = (PGPROC *) SHMQueuePrev(&(WalSndCtl->SyncRepQueue[mode]), @@ -428,12 +428,12 @@ SyncRepReleaseWaiters(void) * Set the lsn first so that when we wake backends they will release up to * this location. */ - if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_WRITE], MyWalSnd->write)) + if (walsndctl->lsn[SYNC_REP_WAIT_WRITE] < MyWalSnd->write) { walsndctl->lsn[SYNC_REP_WAIT_WRITE] = MyWalSnd->write; numwrite = SyncRepWakeQueue(false, SYNC_REP_WAIT_WRITE); } - if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_FLUSH], MyWalSnd->flush)) + if (walsndctl->lsn[SYNC_REP_WAIT_FLUSH] < MyWalSnd->flush) { walsndctl->lsn[SYNC_REP_WAIT_FLUSH] = MyWalSnd->flush; numflush = SyncRepWakeQueue(false, SYNC_REP_WAIT_FLUSH); @@ -543,7 +543,7 @@ SyncRepWakeQueue(bool all, int mode) /* * Assume the queue is ordered by LSN */ - if (!all && XLByteLT(walsndctl->lsn[mode], proc->waitLSN)) + if (!all && walsndctl->lsn[mode] < proc->waitLSN) return numprocs; /* @@ -640,7 +640,7 @@ SyncRepQueueIsOrderedByLSN(int mode) * Check the queue is ordered by LSN and that multiple procs don't * have matching LSNs */ - if (XLByteLE(proc->waitLSN, lastLSN)) + if (proc->waitLSN <= lastLSN) return false; lastLSN = proc->waitLSN; diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index a0960f2ceab..bdb00f6cbe3 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -914,7 +914,7 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr) } /* Update state for write */ - XLByteAdvance(recptr, byteswritten); + recptr += byteswritten; recvOff += byteswritten; nbytes -= byteswritten; @@ -933,7 +933,7 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr) static void XLogWalRcvFlush(bool dying) { - if (XLByteLT(LogstreamResult.Flush, LogstreamResult.Write)) + if (LogstreamResult.Flush < LogstreamResult.Write) { /* use volatile pointer to prevent code rearrangement */ volatile WalRcvData *walrcv = WalRcv; @@ -944,7 +944,7 @@ XLogWalRcvFlush(bool dying) /* Update shared-memory status */ SpinLockAcquire(&walrcv->mutex); - if (XLByteLT(walrcv->receivedUpto, LogstreamResult.Flush)) + if (walrcv->receivedUpto < LogstreamResult.Flush) { walrcv->latestChunkStart = walrcv->receivedUpto; walrcv->receivedUpto = LogstreamResult.Flush; @@ -1016,8 +1016,8 @@ XLogWalRcvSendReply(bool force, bool requestReply) * probably OK. */ if (!force - && XLByteEQ(writePtr, LogstreamResult.Write) - && XLByteEQ(flushPtr, LogstreamResult.Flush) + && writePtr == LogstreamResult.Write + && flushPtr == LogstreamResult.Flush && !TimestampDifferenceExceeds(sendTime, now, wal_receiver_status_interval * 1000)) return; @@ -1126,7 +1126,7 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime) /* Update shared-memory status */ SpinLockAcquire(&walrcv->mutex); - if (XLByteLT(walrcv->latestWalEnd, walEnd)) + if (walrcv->latestWalEnd < walEnd) walrcv->latestWalEndTime = sendTime; walrcv->latestWalEnd = walEnd; walrcv->lastMsgSendTime = sendTime; diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c index 1aaafbb49fc..e87d59ea1d8 100644 --- a/src/backend/replication/walreceiverfuncs.c +++ b/src/backend/replication/walreceiverfuncs.c @@ -326,7 +326,7 @@ GetReplicationApplyDelay(void) replayPtr = GetXLogReplayRecPtr(NULL); - if (XLByteEQ(receivePtr, replayPtr)) + if (receivePtr == replayPtr) return 0; TimestampDifference(GetCurrentChunkReplayStartTime(), diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index df6ad6df7bb..c0d30241891 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -471,7 +471,7 @@ StartReplication(StartReplicationCmd *cmd) * WAL segment. */ if (!XLogRecPtrIsInvalid(switchpoint) && - XLByteLT(switchpoint, cmd->startpoint)) + switchpoint < cmd->startpoint) { ereport(ERROR, (errmsg("requested starting point %X/%X on timeline %u is not in this server's history", @@ -497,7 +497,7 @@ StartReplication(StartReplicationCmd *cmd) /* If there is nothing to stream, don't even enter COPY mode */ if (!sendTimeLineIsHistoric || - XLByteLT(cmd->startpoint, sendTimeLineValidUpto)) + cmd->startpoint < sendTimeLineValidUpto) { /* * When we first start replication the standby will be behind the primary. @@ -520,7 +520,7 @@ StartReplication(StartReplicationCmd *cmd) * Don't allow a request to stream from a future point in WAL that * hasn't been flushed to disk in this server yet. */ - if (XLByteLT(FlushPtr, cmd->startpoint)) + if (FlushPtr < cmd->startpoint) { ereport(ERROR, (errmsg("requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X", @@ -1249,7 +1249,7 @@ retry: } /* Update state for read */ - XLByteAdvance(recptr, readbytes); + recptr += readbytes; sendOff += readbytes; nbytes -= readbytes; @@ -1384,11 +1384,11 @@ XLogSend(bool *caughtup) history = readTimeLineHistory(ThisTimeLineID); sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history); - Assert(XLByteLE(sentPtr, sendTimeLineValidUpto)); + Assert(sentPtr <= sendTimeLineValidUpto); list_free_deep(history); - /* the switchpoint should be >= current send pointer */ - if (!XLByteLE(sentPtr, sendTimeLineValidUpto)) + /* the current send pointer should be <= the switchpoint */ + if (!(sentPtr <= sendTimeLineValidUpto)) elog(ERROR, "server switched off timeline %u at %X/%X, but walsender already streamed up to %X/%X", sendTimeLine, (uint32) (sendTimeLineValidUpto >> 32), @@ -1420,7 +1420,7 @@ XLogSend(bool *caughtup) * If this is a historic timeline and we've reached the point where we * forked to the next timeline, stop streaming. */ - if (sendTimeLineIsHistoric && XLByteLE(sendTimeLineValidUpto, sentPtr)) + if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr) { /* close the current file. */ if (sendFile >= 0) @@ -1436,8 +1436,8 @@ XLogSend(bool *caughtup) } /* Do we have any work to do? */ - Assert(XLByteLE(sentPtr, SendRqstPtr)); - if (XLByteLE(SendRqstPtr, sentPtr)) + Assert(sentPtr <= SendRqstPtr); + if (SendRqstPtr <= sentPtr) { *caughtup = true; return; @@ -1456,10 +1456,10 @@ XLogSend(bool *caughtup) */ startptr = sentPtr; endptr = startptr; - XLByteAdvance(endptr, MAX_SEND_SIZE); + endptr += MAX_SEND_SIZE; /* if we went beyond SendRqstPtr, back off */ - if (XLByteLE(SendRqstPtr, endptr)) + if (SendRqstPtr <= endptr) { endptr = SendRqstPtr; if (sendTimeLineIsHistoric) @@ -1968,7 +1968,7 @@ GetOldestWALSendPointer(void) if (recptr.xlogid == 0 && recptr.xrecoff == 0) continue; - if (!found || XLByteLT(recptr, oldest)) + if (!found || recptr < oldest) oldest = recptr; found = true; } diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index 8502d560678..5a1c5981e46 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -636,7 +636,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, /* Write was successful, advance our position */ bytes_written += bytes_to_write; bytes_left -= bytes_to_write; - XLByteAdvance(blockpos, bytes_to_write); + blockpos += bytes_to_write; xlogoff += bytes_to_write; /* Did we reach the end of a WAL segment? */ diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h index 153d0de22a9..f3acb2f56ee 100644 --- a/src/include/access/xlogdefs.h +++ b/src/include/access/xlogdefs.h @@ -29,20 +29,6 @@ typedef uint64 XLogRecPtr; #define XLogRecPtrIsInvalid(r) ((r) == InvalidXLogRecPtr) /* - * Macros for comparing XLogRecPtrs - */ -#define XLByteLT(a, b) ((a) < (b)) -#define XLByteLE(a, b) ((a) <= (b)) -#define XLByteEQ(a, b) ((a) == (b)) - - -/* - * Macro for advancing a record pointer by the specified number of bytes. - */ -#define XLByteAdvance(recptr, nbytes) \ - (recptr) += nbytes \ - -/* * XLogSegNo - physical log file sequence number. */ typedef uint64 XLogSegNo; |