From 31568e17fd44abbedd0b477692f31d80926063d3 Mon Sep 17 00:00:00 2001 From: Simon Riggs Date: Mon, 5 Oct 2009 11:18:03 +0100 Subject: [PATCH] Apply changes from Heikki's patch 0002-Comment-whitespace.. etc Which contained some code changes as well, all very minor, all approved. --- src/backend/access/nbtree/README | 6 +++--- src/backend/access/nbtree/nbtpage.c | 3 +-- src/backend/access/nbtree/nbtxlog.c | 14 +++++++------- src/backend/storage/ipc/procarray.c | 2 +- src/include/storage/lock.h | 1 - src/include/utils/snapshot.h | 2 +- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README index c58d2fcaf3..8739d9ce12 100644 --- a/src/backend/access/nbtree/README +++ b/src/backend/access/nbtree/README @@ -410,7 +410,7 @@ situation the locking requirements can be relaxed and we do not need double locking during block splits. Each WAL record makes changes to a single level of the btree using the correct locking sequence and so is safe for concurrent readers. Some readers may observe a block split -in progress as they descend the tree, but they will simple move right +in progress as they descend the tree, but they will simply move right onto the correct page. During recovery all index scans start with ignore_killed_tuples = false @@ -419,8 +419,8 @@ on the standby server can be older than the oldest xmin on the master server, which means tuples can be marked as killed even when they are still visible on the standby. We don't WAL log tuple killed bits, but they can still appear in the standby because of full page writes. So -we must always ignore them and that means it's not worth setting them -either. +we must always ignore them in standby, and that means it's not worth +setting them either. Other Things That Are Handy to Know ----------------------------------- diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index eefa888f6c..04958709f2 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -719,8 +719,7 @@ _bt_delitems(Relation rel, Buffer buf, /* * We would like to set an accurate latestRemovedXid, but there - * is no easy way of obtaining a useful value. So we use the - * probably far too conservative value of RecentGlobalXmin instead. + * is no easy way of obtaining a useful value. */ xlrec_delete.latestRemovedXid = InvalidTransactionId; rdata[0].data = (char *) &xlrec_delete; diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 864c41c6be..50d6ce25ca 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -503,8 +503,8 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record) } /* - * We need to take a cleanup lock to apply these changes. - * See nbtree/README for details. + * Like in btvacuumpage(), we need to take a cleanup lock on every leaf + * page. See nbtree/README for details. */ buffer = XLogReadBufferExtended(xlrec->node, MAIN_FORKNUM, xlrec->block, RBM_NORMAL); if (!BufferIsValid(buffer)) @@ -805,11 +805,11 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record) /* * Btree delete records can conflict with standby queries. You might - * think that vacuum records would conflict as well, but they don't. - * XLOG_HEAP2_CLEANUP_INFO records provide the highest xid cleaned - * by the vacuum of the heap and so we can resolve any conflicts just - * once when that arrives. After that any we know that no conflicts exist - * from individual btree vacuum records on that index. + * think that vacuum records would conflict as well, but we've handled + * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid + * cleaned by the vacuum of the heap and so we can resolve any conflicts + * just once when that arrives. After that any we know that no conflicts + * exist from individual btree vacuum records on that index. */ if (InHotStandby) { diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 9d3f29e90f..3cb71f3116 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1232,7 +1232,7 @@ void GetRunningTransactionData(void) { ProcArrayStruct *arrayP = procArray; - static RunningTransactions CurrentRunningXacts = (RunningTransactions) &CurrentRunningXactsData; + RunningTransactions CurrentRunningXacts = (RunningTransactions) &CurrentRunningXactsData; RunningXact *rxact; TransactionId *subxip; TransactionId latestRunningXid = InvalidTransactionId; diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h index f674547672..cbf3c9831e 100644 --- a/src/include/storage/lock.h +++ b/src/include/storage/lock.h @@ -294,7 +294,6 @@ typedef struct LOCKTAG * nRequested -- total requested locks of all types. * granted -- count of each lock type currently granted on the lock. * nGranted -- total granted locks of all types. - * xid -- xid of current/only lock holder for use by GetLockStatusData() * * Note: these counts count 1 for each backend. Internally to a backend, * there may be multiple grabs on a particular lock, but this is not reflected diff --git a/src/include/utils/snapshot.h b/src/include/utils/snapshot.h index 92feaf827b..d0171117e0 100644 --- a/src/include/utils/snapshot.h +++ b/src/include/utils/snapshot.h @@ -51,7 +51,7 @@ typedef struct SnapshotData /* note: all ids in xip[] satisfy xmin <= xip[i] < xmax */ int32 subxcnt; /* # of xact ids in subxip[], -1 if overflow */ TransactionId *subxip; /* array of subxact IDs in progress */ - bool takenDuringRecovery; /* Recovery-shaped snapshot? */ + bool takenDuringRecovery; /* recovery-shaped snapshot? */ /* * note: all ids in subxip[] are >= xmin, but we don't bother filtering * out any that are >= xmax -- 2.39.5