diff options
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/access/heap/heapam.c | 2 | ||||
-rw-r--r-- | src/backend/access/transam/README | 11 | ||||
-rw-r--r-- | src/backend/access/transam/varsup.c | 13 | ||||
-rw-r--r-- | src/backend/commands/async.c | 11 | ||||
-rw-r--r-- | src/backend/commands/vacuum.c | 10 | ||||
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 2 |
6 files changed, 22 insertions, 27 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index f75e1cf0e7b..9b5f417eac4 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6920,8 +6920,6 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, * updated/deleted by the inserting transaction. * * Look for a committed hint bit, or if no xmin bit is set, check clog. - * This needs to work on both primary and standby, where it is used to - * assess btree delete records. */ if (HeapTupleHeaderXminCommitted(tuple) || (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin))) diff --git a/src/backend/access/transam/README b/src/backend/access/transam/README index c5f09667ba1..1edc8180c12 100644 --- a/src/backend/access/transam/README +++ b/src/backend/access/transam/README @@ -635,12 +635,11 @@ be reconstructed later following a crash and the action is simply a way of optimising for performance. When a hint is written we use MarkBufferDirtyHint() to mark the block dirty. -If the buffer is clean and checksums are in use then -MarkBufferDirtyHint() inserts an XLOG_FPI record to ensure that we -take a full page image that includes the hint. We do this to avoid -a partial page write, when we write the dirtied page. WAL is not -written during recovery, so we simply skip dirtying blocks because -of hints when in recovery. +If the buffer is clean and checksums are in use then MarkBufferDirtyHint() +inserts an XLOG_FPI_FOR_HINT record to ensure that we take a full page image +that includes the hint. We do this to avoid a partial page write, when we +write the dirtied page. WAL is not written during recovery, so we simply skip +dirtying blocks because of hints when in recovery. If you do decide to optimise away a WAL record, then any calls to MarkBufferDirty() must be replaced by MarkBufferDirtyHint(), diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 2d2b05be36c..a4944faa32e 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -367,12 +367,13 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) * We'll refuse to continue assigning XIDs in interactive mode once we get * within 3M transactions of data loss. This leaves lots of room for the * DBA to fool around fixing things in a standalone backend, while not - * being significant compared to total XID space. (Note that since - * vacuuming requires one transaction per table cleaned, we had better be - * sure there's lots of XIDs left...) Also, at default BLCKSZ, this - * leaves two completely-idle segments. In the event of edge-case bugs - * involving page or segment arithmetic, idle segments render the bugs - * unreachable outside of single-user mode. + * being significant compared to total XID space. (VACUUM requires an XID + * if it truncates at wal_level!=minimal. "VACUUM (ANALYZE)", which a DBA + * might do by reflex, assigns an XID. Hence, we had better be sure + * there's lots of XIDs left...) Also, at default BLCKSZ, this leaves two + * completely-idle segments. In the event of edge-case bugs involving + * page or segment arithmetic, idle segments render the bugs unreachable + * outside of single-user mode. */ xidStopLimit = xidWrapLimit - 3000000; if (xidStopLimit < FirstNormalTransactionId) diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index 4c1286eb988..774b26fd2c4 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -302,13 +302,10 @@ static SlruCtlData NotifyCtlData; #define QUEUE_FULL_WARN_INTERVAL 5000 /* warn at most once every 5s */ /* - * slru.c currently assumes that all filenames are four characters of hex - * digits. That means that we can use segments 0000 through FFFF. - * Each segment contains SLRU_PAGES_PER_SEGMENT pages which gives us - * the pages from 0 to SLRU_PAGES_PER_SEGMENT * 0x10000 - 1. - * - * It's of course possible to enhance slru.c, but this gives us so much - * space already that it doesn't seem worth the trouble. + * Use segments 0000 through FFFF. Each contains SLRU_PAGES_PER_SEGMENT pages + * which gives us the pages from 0 to SLRU_PAGES_PER_SEGMENT * 0x10000 - 1. + * We could use as many segments as SlruScanDirectory() allows, but this gives + * us so much space already that it doesn't seem worth the trouble. * * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2 * pages, because more than that would confuse slru.c into thinking there diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 5189a5ad5e3..23eb605d4cb 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -949,11 +949,11 @@ vacuum_set_xid_limits(Relation rel, /* * We can always ignore processes running lazy vacuum. This is because we * use these values only for deciding which tuples we must keep in the - * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to - * ignore it. In theory it could be problematic to ignore lazy vacuums in - * a full vacuum, but keep in mind that only one vacuum process can be - * working on a particular table at any time, and that each vacuum is - * always an independent transaction. + * tables. Since lazy vacuum doesn't write its XID anywhere (usually no + * XID assigned), it's safe to ignore it. In theory it could be + * problematic to ignore lazy vacuums in a full vacuum, but keep in mind + * that only one vacuum process can be working on a particular table at + * any time, and that each vacuum is always an independent transaction. */ *oldestXmin = GetOldestNonRemovableTransactionId(rel); diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index f1ae6f9f844..a2a963bd5b4 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -3578,7 +3578,7 @@ IncrBufferRefCount(Buffer buffer) * This is essentially the same as MarkBufferDirty, except: * * 1. The caller does not write WAL; so if checksums are enabled, we may need - * to write an XLOG_FPI WAL record to protect against torn pages. + * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages. * 2. The caller might have only share-lock instead of exclusive-lock on the * buffer's content lock. * 3. This function does not guarantee that the buffer is always marked dirty |