diff options
author | Robert Haas | 2022-09-28 13:45:27 +0000 |
---|---|---|
committer | Robert Haas | 2022-09-28 13:55:28 +0000 |
commit | a448e49bcbe40fb72e1ed85af910dd216d45bad8 (patch) | |
tree | 2815aed4f5e89bdea91cdd35ec89facaa846e438 /src/backend/access | |
parent | 6af082723277eeca74f2da65e7759666bf7c7f9c (diff) |
Revert 56-bit relfilenode change and follow-up commits.
There are still some alignment-related failures in the buildfarm,
which might or might not be able to be fixed quickly, but I've also
just realized that it increased the size of many WAL records by 4 bytes
because a block reference contains a RelFileLocator. The effect of that
hasn't been studied or discussed, so revert for now.
Diffstat (limited to 'src/backend/access')
-rw-r--r-- | src/backend/access/gin/ginxlog.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/gistdesc.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/heapdesc.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/nbtdesc.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/seqdesc.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/xlogdesc.c | 21 | ||||
-rw-r--r-- | src/backend/access/transam/README | 5 | ||||
-rw-r--r-- | src/backend/access/transam/varsup.c | 209 | ||||
-rw-r--r-- | src/backend/access/transam/xlog.c | 60 | ||||
-rw-r--r-- | src/backend/access/transam/xlogprefetcher.c | 14 | ||||
-rw-r--r-- | src/backend/access/transam/xlogrecovery.c | 6 | ||||
-rw-r--r-- | src/backend/access/transam/xlogutils.c | 6 |
12 files changed, 27 insertions, 304 deletions
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index bc093f2a887..41b92115bff 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -100,7 +100,7 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda BlockNumber blknum; BufferGetTag(buffer, &locator, &forknum, &blknum); - elog(ERROR, "failed to add item to index page in %u/%u/" UINT64_FORMAT, + elog(ERROR, "failed to add item to index page in %u/%u/%u", locator.spcOid, locator.dbOid, locator.relNumber); } } diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c index d1c8a24d66f..7dd3c1d500f 100644 --- a/src/backend/access/rmgrdesc/gistdesc.c +++ b/src/backend/access/rmgrdesc/gistdesc.c @@ -26,7 +26,7 @@ out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec) static void out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec) { - appendStringInfo(buf, "rel %u/%u/" UINT64_FORMAT "; blk %u; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, xlrec->block, EpochFromFullTransactionId(xlrec->latestRemovedFullXid), diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 70bd49303a9..923d3bc43df 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -169,7 +169,7 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_new_cid *xlrec = (xl_heap_new_cid *) rec; - appendStringInfo(buf, "rel %u/%u/" UINT64_FORMAT "; tid %u/%u", + appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u", xlrec->target_locator.spcOid, xlrec->target_locator.dbOid, xlrec->target_locator.relNumber, diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index 6192a7ba841..4843cd530df 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -100,7 +100,7 @@ btree_desc(StringInfo buf, XLogReaderState *record) { xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec; - appendStringInfo(buf, "rel %u/%u/" UINT64_FORMAT "; latestRemovedXid %u:%u", + appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber, EpochFromFullTransactionId(xlrec->latestRemovedFullXid), diff --git a/src/backend/access/rmgrdesc/seqdesc.c b/src/backend/access/rmgrdesc/seqdesc.c index df72caf1768..b3845f93bff 100644 --- a/src/backend/access/rmgrdesc/seqdesc.c +++ b/src/backend/access/rmgrdesc/seqdesc.c @@ -25,7 +25,7 @@ seq_desc(StringInfo buf, XLogReaderState *record) xl_seq_rec *xlrec = (xl_seq_rec *) rec; if (info == XLOG_SEQ_LOG) - appendStringInfo(buf, "rel %u/%u/" UINT64_FORMAT, + appendStringInfo(buf, "rel %u/%u/%u", xlrec->locator.spcOid, xlrec->locator.dbOid, xlrec->locator.relNumber); } diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index b1cede74cf4..3fd7185f217 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -45,8 +45,8 @@ xlog_desc(StringInfo buf, XLogReaderState *record) CheckPoint *checkpoint = (CheckPoint *) rec; appendStringInfo(buf, "redo %X/%X; " - "tli %u; prev tli %u; fpw %s; xid %u:%u; relfilenumber " UINT64_FORMAT "; oid %u; " - "multi %u; offset %u; oldest xid %u in DB %u; oldest multi %u in DB %u; " + "tli %u; prev tli %u; fpw %s; xid %u:%u; oid %u; multi %u; offset %u; " + "oldest xid %u in DB %u; oldest multi %u in DB %u; " "oldest/newest commit timestamp xid: %u/%u; " "oldest running xid %u; %s", LSN_FORMAT_ARGS(checkpoint->redo), @@ -55,7 +55,6 @@ xlog_desc(StringInfo buf, XLogReaderState *record) checkpoint->fullPageWrites ? "true" : "false", EpochFromFullTransactionId(checkpoint->nextXid), XidFromFullTransactionId(checkpoint->nextXid), - checkpoint->nextRelFileNumber, checkpoint->nextOid, checkpoint->nextMulti, checkpoint->nextMultiOffset, @@ -75,13 +74,6 @@ xlog_desc(StringInfo buf, XLogReaderState *record) memcpy(&nextOid, rec, sizeof(Oid)); appendStringInfo(buf, "%u", nextOid); } - else if (info == XLOG_NEXT_RELFILENUMBER) - { - RelFileNumber nextRelFileNumber; - - memcpy(&nextRelFileNumber, rec, sizeof(RelFileNumber)); - appendStringInfo(buf, UINT64_FORMAT, nextRelFileNumber); - } else if (info == XLOG_RESTORE_POINT) { xl_restore_point *xlrec = (xl_restore_point *) rec; @@ -177,9 +169,6 @@ xlog_identify(uint8 info) case XLOG_NEXTOID: id = "NEXTOID"; break; - case XLOG_NEXT_RELFILENUMBER: - id = "NEXT_RELFILENUMBER"; - break; case XLOG_SWITCH: id = "SWITCH"; break; @@ -248,7 +237,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, appendStringInfoChar(buf, ' '); appendStringInfo(buf, - "blkref #%d: rel %u/%u/" UINT64_FORMAT " fork %s blk %u", + "blkref #%d: rel %u/%u/%u fork %s blk %u", block_id, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, forkNames[forknum], @@ -308,7 +297,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, if (forknum != MAIN_FORKNUM) { appendStringInfo(buf, - ", blkref #%d: rel %u/%u/" UINT64_FORMAT " fork %s blk %u", + ", blkref #%d: rel %u/%u/%u fork %s blk %u", block_id, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, forkNames[forknum], @@ -317,7 +306,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, else { appendStringInfo(buf, - ", blkref #%d: rel %u/%u/" UINT64_FORMAT " blk %u", + ", blkref #%d: rel %u/%u/%u blk %u", block_id, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blk); diff --git a/src/backend/access/transam/README b/src/backend/access/transam/README index 91c2578f7a9..72af6560600 100644 --- a/src/backend/access/transam/README +++ b/src/backend/access/transam/README @@ -692,9 +692,8 @@ by having database restart search for files that don't have any committed entry in pg_class, but that currently isn't done because of the possibility of deleting data that is useful for forensic analysis of the crash. Orphan files are harmless --- at worst they waste a bit of disk space --- -because the relfilenumber counter is monotonically increasing. The maximum -value is 2^56-1, and there is no provision for wraparound. Thus, on-disk -collisions aren't possible. +because we check for on-disk collisions when allocating new relfilenumber +OIDs. So cleaning up isn't really necessary. 3. Deleting a table, which requires an unlink() that could fail. diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 89da2f44590..849a7ce9d6d 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -13,16 +13,12 @@ #include "postgres.h" -#include <unistd.h> - #include "access/clog.h" #include "access/commit_ts.h" #include "access/subtrans.h" #include "access/transam.h" #include "access/xact.h" #include "access/xlogutils.h" -#include "catalog/pg_class.h" -#include "catalog/pg_tablespace.h" #include "commands/dbcommands.h" #include "miscadmin.h" #include "postmaster/autovacuum.h" @@ -34,15 +30,6 @@ /* Number of OIDs to prefetch (preallocate) per XLOG write */ #define VAR_OID_PREFETCH 8192 -/* Number of RelFileNumbers to be logged per XLOG write */ -#define VAR_RELNUMBER_PER_XLOG 512 - -/* - * Need to log more if remaining logged RelFileNumbers are less than the - * threshold. Valid range could be between 0 to VAR_RELNUMBER_PER_XLOG - 1. - */ -#define VAR_RELNUMBER_NEW_XLOG_THRESHOLD 256 - /* pointer to "variable cache" in shared memory (set up by shmem.c) */ VariableCache ShmemVariableCache = NULL; @@ -534,7 +521,8 @@ ForceTransactionIdLimitUpdate(void) * wide, counter wraparound will occur eventually, and therefore it is unwise * to assume they are unique unless precautions are taken to make them so. * Hence, this routine should generally not be used directly. The only direct - * caller should be GetNewOidWithIndex() in catalog/catalog.c. + * callers should be GetNewOidWithIndex() and GetNewRelFileNumber() in + * catalog/catalog.c. */ Oid GetNewObjectId(void) @@ -625,199 +613,6 @@ SetNextObjectId(Oid nextOid) } /* - * GetNewRelFileNumber - * - * Similar to GetNewObjectId but instead of new Oid it generates new - * relfilenumber. - */ -RelFileNumber -GetNewRelFileNumber(Oid reltablespace, char relpersistence) -{ - RelFileNumber result; - RelFileNumber nextRelFileNumber, - loggedRelFileNumber, - flushedRelFileNumber; - - StaticAssertStmt(VAR_RELNUMBER_NEW_XLOG_THRESHOLD < VAR_RELNUMBER_PER_XLOG, - "VAR_RELNUMBER_NEW_XLOG_THRESHOLD must be smaller than VAR_RELNUMBER_PER_XLOG"); - - /* safety check, we should never get this far in a HS standby */ - if (RecoveryInProgress()) - elog(ERROR, "cannot assign RelFileNumber during recovery"); - - if (IsBinaryUpgrade) - elog(ERROR, "cannot assign RelFileNumber during binary upgrade"); - - LWLockAcquire(RelFileNumberGenLock, LW_EXCLUSIVE); - - nextRelFileNumber = ShmemVariableCache->nextRelFileNumber; - loggedRelFileNumber = ShmemVariableCache->loggedRelFileNumber; - flushedRelFileNumber = ShmemVariableCache->flushedRelFileNumber; - - Assert(nextRelFileNumber <= flushedRelFileNumber); - Assert(flushedRelFileNumber <= loggedRelFileNumber); - - /* check for the wraparound for the relfilenumber counter */ - if (unlikely(nextRelFileNumber > MAX_RELFILENUMBER)) - elog(ERROR, "relfilenumber is too large"); - - /* - * If the remaining logged relfilenumbers values are less than the - * threshold value then log more. Ideally, we can wait until all - * relfilenumbers have been consumed before logging more. Nevertheless, if - * we do that, we must immediately flush the logged wal record because we - * want to ensure that the nextRelFileNumber is always larger than any - * relfilenumber already in use on disk. And, to maintain that invariant, - * we must make sure that the record we log reaches the disk before any new - * files are created with the newly logged range. - * - * So in order to avoid flushing the wal immediately, we always log before - * consuming all the relfilenumber, and now we only have to flush the newly - * logged relfilenumber wal before consuming the relfilenumber from this - * new range. By the time we need to flush this wal, hopefully, those have - * already been flushed with some other XLogFlush operation. - */ - if (loggedRelFileNumber - nextRelFileNumber <= - VAR_RELNUMBER_NEW_XLOG_THRESHOLD) - { - XLogRecPtr recptr; - - loggedRelFileNumber = loggedRelFileNumber + VAR_RELNUMBER_PER_XLOG; - recptr = LogNextRelFileNumber(loggedRelFileNumber); - ShmemVariableCache->loggedRelFileNumber = loggedRelFileNumber; - - /* remember for the future flush */ - ShmemVariableCache->loggedRelFileNumberRecPtr = recptr; - } - - /* - * If the nextRelFileNumber is already reached to the already flushed - * relfilenumber then flush the WAL for previously logged relfilenumber. - */ - if (nextRelFileNumber >= flushedRelFileNumber) - { - XLogFlush(ShmemVariableCache->loggedRelFileNumberRecPtr); - ShmemVariableCache->flushedRelFileNumber = loggedRelFileNumber; - } - - result = ShmemVariableCache->nextRelFileNumber; - - /* we should never be using any relfilenumber outside the flushed range */ - Assert(result <= ShmemVariableCache->flushedRelFileNumber); - - (ShmemVariableCache->nextRelFileNumber)++; - - LWLockRelease(RelFileNumberGenLock); - - /* - * Because the RelFileNumber counter only ever increases and never wraps - * around, it should be impossible for the newly-allocated RelFileNumber to - * already be in use. But, if Asserts are enabled, double check that - * there's no main-fork relation file with the new RelFileNumber already on - * disk. - */ -#ifdef USE_ASSERT_CHECKING - { - RelFileLocatorBackend rlocator; - char *rpath; - BackendId backend; - - switch (relpersistence) - { - case RELPERSISTENCE_TEMP: - backend = BackendIdForTempRelations(); - break; - case RELPERSISTENCE_UNLOGGED: - case RELPERSISTENCE_PERMANENT: - backend = InvalidBackendId; - break; - default: - elog(ERROR, "invalid relpersistence: %c", relpersistence); - } - - /* this logic should match RelationInitPhysicalAddr */ - rlocator.locator.spcOid = - reltablespace ? reltablespace : MyDatabaseTableSpace; - rlocator.locator.dbOid = (reltablespace == GLOBALTABLESPACE_OID) ? - InvalidOid : MyDatabaseId; - rlocator.locator.relNumber = result; - - /* - * The relpath will vary based on the backend ID, so we must - * initialize that properly here to make sure that any collisions - * based on filename are properly detected. - */ - rlocator.backend = backend; - - /* check for existing file of same name. */ - rpath = relpath(rlocator, MAIN_FORKNUM); - Assert(access(rpath, F_OK) != 0); - } -#endif - - return result; -} - -/* - * SetNextRelFileNumber - * - * This may only be called during pg_upgrade; it advances the RelFileNumber - * counter to the specified value if the current value is smaller than the - * input value. - */ -void -SetNextRelFileNumber(RelFileNumber relnumber) -{ - /* safety check, we should never get this far in a HS standby */ - if (RecoveryInProgress()) - elog(ERROR, "cannot set RelFileNumber during recovery"); - - if (!IsBinaryUpgrade) - elog(ERROR, "RelFileNumber can be set only during binary upgrade"); - - LWLockAcquire(RelFileNumberGenLock, LW_EXCLUSIVE); - - /* - * If previous assigned value of the nextRelFileNumber is already higher - * than the current value then nothing to be done. This is possible - * because during upgrade the objects are not created in relfilenumber - * order. - */ - if (relnumber <= ShmemVariableCache->nextRelFileNumber) - { - LWLockRelease(RelFileNumberGenLock); - return; - } - - /* - * If the new relfilenumber to be set is greater than or equal to already - * flushed relfilenumber then log more and flush immediately. - * - * (This is less efficient than GetNewRelFileNumber, which arranges to - * log some new relfilenumbers before the old batch is exhausted in the - * hope that a flush will happen in the background before any values are - * needed from the new batch. However, since this is only used during - * binary upgrade, it shouldn't really matter.) - */ - if (relnumber >= ShmemVariableCache->flushedRelFileNumber) - { - RelFileNumber newlogrelnum; - - newlogrelnum = relnumber + VAR_RELNUMBER_PER_XLOG; - XLogFlush(LogNextRelFileNumber(newlogrelnum)); - - /* we have flushed whatever we have logged so no pending flush */ - ShmemVariableCache->loggedRelFileNumber = newlogrelnum; - ShmemVariableCache->flushedRelFileNumber = newlogrelnum; - ShmemVariableCache->loggedRelFileNumberRecPtr = InvalidXLogRecPtr; - } - - ShmemVariableCache->nextRelFileNumber = relnumber; - - LWLockRelease(RelFileNumberGenLock); -} - -/* * StopGeneratingPinnedObjectIds * * This is called once during initdb to force the OID counter up to diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 35fac945cb1..00992a11b9e 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -4712,7 +4712,6 @@ BootStrapXLOG(void) checkPoint.nextXid = FullTransactionIdFromEpochAndXid(0, FirstNormalTransactionId); checkPoint.nextOid = FirstGenbkiObjectId; - checkPoint.nextRelFileNumber = FirstNormalRelFileNumber; checkPoint.nextMulti = FirstMultiXactId; checkPoint.nextMultiOffset = 0; checkPoint.oldestXid = FirstNormalTransactionId; @@ -4726,11 +4725,7 @@ BootStrapXLOG(void) ShmemVariableCache->nextXid = checkPoint.nextXid; ShmemVariableCache->nextOid = checkPoint.nextOid; - ShmemVariableCache->nextRelFileNumber = checkPoint.nextRelFileNumber; ShmemVariableCache->oidCount = 0; - ShmemVariableCache->loggedRelFileNumber = checkPoint.nextRelFileNumber; - ShmemVariableCache->flushedRelFileNumber = checkPoint.nextRelFileNumber; - ShmemVariableCache->loggedRelFileNumberRecPtr = InvalidXLogRecPtr; MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); AdvanceOldestClogXid(checkPoint.oldestXid); SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB); @@ -5196,10 +5191,7 @@ StartupXLOG(void) /* initialize shared memory variables from the checkpoint record */ ShmemVariableCache->nextXid = checkPoint.nextXid; ShmemVariableCache->nextOid = checkPoint.nextOid; - ShmemVariableCache->nextRelFileNumber = checkPoint.nextRelFileNumber; ShmemVariableCache->oidCount = 0; - ShmemVariableCache->loggedRelFileNumber = checkPoint.nextRelFileNumber; - ShmemVariableCache->flushedRelFileNumber = checkPoint.nextRelFileNumber; MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); AdvanceOldestClogXid(checkPoint.oldestXid); SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB); @@ -6671,24 +6663,6 @@ CreateCheckPoint(int flags) checkPoint.nextOid += ShmemVariableCache->oidCount; LWLockRelease(OidGenLock); - /* - * If this is a shutdown checkpoint then we can safely start allocating - * relfilenumber from the nextRelFileNumber value after the restart because - * no one one else can use the relfilenumber beyond that number before the - * shutdown. OTOH, if it is a normal checkpoint then if there is a crash - * after this point then we might end up reusing the same relfilenumbers - * after the restart so we need to set the nextRelFileNumber to the already - * logged relfilenumber as no one will use number beyond this limit without - * logging again. - */ - LWLockAcquire(RelFileNumberGenLock, LW_SHARED); - if (shutdown) - checkPoint.nextRelFileNumber = ShmemVariableCache->nextRelFileNumber; - else - checkPoint.nextRelFileNumber = ShmemVariableCache->loggedRelFileNumber; - - LWLockRelease(RelFileNumberGenLock); - MultiXactGetCheckptMulti(shutdown, &checkPoint.nextMulti, &checkPoint.nextMultiOffset, @@ -7567,24 +7541,6 @@ XLogPutNextOid(Oid nextOid) } /* - * Similar to the XLogPutNextOid but instead of writing NEXTOID log record it - * writes a NEXT_RELFILENUMBER log record. It also returns the XLogRecPtr of - * the currently logged relfilenumber record, so that the caller can flush it - * at the appropriate time. - */ -XLogRecPtr -LogNextRelFileNumber(RelFileNumber nextrelnumber) -{ - XLogRecPtr recptr; - - XLogBeginInsert(); - XLogRegisterData((char *) (&nextrelnumber), sizeof(RelFileNumber)); - recptr = XLogInsert(RM_XLOG_ID, XLOG_NEXT_RELFILENUMBER); - - return recptr; -} - -/* * Write an XLOG SWITCH record. * * Here we just blindly issue an XLogInsert request for the record. @@ -7799,17 +7755,6 @@ xlog_redo(XLogReaderState *record) ShmemVariableCache->oidCount = 0; LWLockRelease(OidGenLock); } - if (info == XLOG_NEXT_RELFILENUMBER) - { - RelFileNumber nextRelFileNumber; - - memcpy(&nextRelFileNumber, XLogRecGetData(record), sizeof(RelFileNumber)); - LWLockAcquire(RelFileNumberGenLock, LW_EXCLUSIVE); - ShmemVariableCache->nextRelFileNumber = nextRelFileNumber; - ShmemVariableCache->loggedRelFileNumber = nextRelFileNumber; - ShmemVariableCache->flushedRelFileNumber = nextRelFileNumber; - LWLockRelease(RelFileNumberGenLock); - } else if (info == XLOG_CHECKPOINT_SHUTDOWN) { CheckPoint checkPoint; @@ -7824,11 +7769,6 @@ xlog_redo(XLogReaderState *record) ShmemVariableCache->nextOid = checkPoint.nextOid; ShmemVariableCache->oidCount = 0; LWLockRelease(OidGenLock); - LWLockAcquire(RelFileNumberGenLock, LW_EXCLUSIVE); - ShmemVariableCache->nextRelFileNumber = checkPoint.nextRelFileNumber; - ShmemVariableCache->loggedRelFileNumber = checkPoint.nextRelFileNumber; - ShmemVariableCache->flushedRelFileNumber = checkPoint.nextRelFileNumber; - LWLockRelease(RelFileNumberGenLock); MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset); diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c index cea38eccea6..8f5d4253320 100644 --- a/src/backend/access/transam/xlogprefetcher.c +++ b/src/backend/access/transam/xlogprefetcher.c @@ -613,7 +613,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "suppressing prefetch in relation %u/%u/" UINT64_FORMAT " until %X/%X is replayed, which creates the relation", + "suppressing prefetch in relation %u/%u/%u until %X/%X is replayed, which creates the relation", xlrec->rlocator.spcOid, xlrec->rlocator.dbOid, xlrec->rlocator.relNumber, @@ -636,7 +636,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "suppressing prefetch in relation %u/%u/" UINT64_FORMAT " from block %u until %X/%X is replayed, which truncates the relation", + "suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, which truncates the relation", xlrec->rlocator.spcOid, xlrec->rlocator.dbOid, xlrec->rlocator.relNumber, @@ -735,7 +735,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) { #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "suppressing all prefetch in relation %u/%u/" UINT64_FORMAT " until %X/%X is replayed, because the relation does not exist on disk", + "suppressing all prefetch in relation %u/%u/%u until %X/%X is replayed, because the relation does not exist on disk", reln->smgr_rlocator.locator.spcOid, reln->smgr_rlocator.locator.dbOid, reln->smgr_rlocator.locator.relNumber, @@ -756,7 +756,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) { #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "suppressing prefetch in relation %u/%u/" UINT64_FORMAT " from block %u until %X/%X is replayed, because the relation is too small", + "suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, because the relation is too small", reln->smgr_rlocator.locator.spcOid, reln->smgr_rlocator.locator.dbOid, reln->smgr_rlocator.locator.relNumber, @@ -795,7 +795,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn) * truncated beneath our feet? */ elog(ERROR, - "could not prefetch relation %u/%u/" UINT64_FORMAT " block %u", + "could not prefetch relation %u/%u/%u block %u", reln->smgr_rlocator.locator.spcOid, reln->smgr_rlocator.locator.dbOid, reln->smgr_rlocator.locator.relNumber, @@ -934,7 +934,7 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator, { #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "prefetch of %u/%u/" UINT64_FORMAT " block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)", + "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)", rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno, LSN_FORMAT_ARGS(filter->filter_until_replayed), filter->filter_from_block); @@ -950,7 +950,7 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator, { #ifdef XLOGPREFETCHER_DEBUG_LEVEL elog(XLOGPREFETCHER_DEBUG_LEVEL, - "prefetch of %u/%u/" UINT64_FORMAT " block %u suppressed; filtering until LSN %X/%X is replayed (whole database)", + "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (whole database)", rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno, LSN_FORMAT_ARGS(filter->filter_until_replayed)); #endif diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 1026ce5dcf7..b41e6826643 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -2228,14 +2228,14 @@ xlog_block_info(StringInfo buf, XLogReaderState *record) continue; if (forknum != MAIN_FORKNUM) - appendStringInfo(buf, "; blkref #%d: rel %u/%u/" UINT64_FORMAT ", fork %u, blk %u", + appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, fork %u, blk %u", block_id, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, forknum, blk); else - appendStringInfo(buf, "; blkref #%d: rel %u/%u/" UINT64_FORMAT ", blk %u", + appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, blk %u", block_id, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, @@ -2433,7 +2433,7 @@ verifyBackupPageConsistency(XLogReaderState *record) if (memcmp(replay_image_masked, primary_image_masked, BLCKSZ) != 0) { elog(FATAL, - "inconsistent page found, rel %u/%u/" UINT64_FORMAT ", forknum %u, blkno %u", + "inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u", rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, forknum, blkno); } diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index ffda2c210b7..563cba258dd 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -619,17 +619,17 @@ CreateFakeRelcacheEntry(RelFileLocator rlocator) rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT; /* We don't know the name of the relation; use relfilenumber instead */ - sprintf(RelationGetRelationName(rel), UINT64_FORMAT, rlocator.relNumber); + sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber); /* * We set up the lockRelId in case anything tries to lock the dummy - * relation. Note that this is fairly bogus since relNumber are completely + * relation. Note that this is fairly bogus since relNumber may be * different from the relation's OID. It shouldn't really matter though. * In recovery, we are running by ourselves and can't have any lock * conflicts. While syncing, we already hold AccessExclusiveLock. */ rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid; - rel->rd_lockInfo.lockRelId.relId = (Oid) rlocator.relNumber; + rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber; rel->rd_smgr = NULL; |