summaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
authorRobert Haas2016-06-09 22:02:36 +0000
committerRobert Haas2016-06-09 22:02:36 +0000
commit4bc424b968058c7f0aa685821d7039e86faac99c (patch)
treea4e245ae67bd11edb3926ff5fb3b0223438ac283 /src/backend/storage
parent9164deea2f4ac90ee5e008ff41fc5ad4423887b2 (diff)
pgindent run for 9.6
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/buffer/buf_init.c11
-rw-r--r--src/backend/storage/buffer/bufmgr.c4
-rw-r--r--src/backend/storage/freespace/freespace.c20
-rw-r--r--src/backend/storage/ipc/dsm.c4
-rw-r--r--src/backend/storage/ipc/procarray.c16
-rw-r--r--src/backend/storage/ipc/shm_mq.c4
-rw-r--r--src/backend/storage/ipc/standby.c1
-rw-r--r--src/backend/storage/lmgr/lock.c34
-rw-r--r--src/backend/storage/lmgr/lwlock.c36
-rw-r--r--src/backend/storage/lmgr/proc.c8
10 files changed, 70 insertions, 68 deletions
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index 5804870ad48..a4163cf717d 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -187,11 +187,12 @@ BufferShmemSize(void)
/*
* It would be nice to include the I/O locks in the BufferDesc, but that
- * would increase the size of a BufferDesc to more than one cache line, and
- * benchmarking has shown that keeping every BufferDesc aligned on a cache
- * line boundary is important for performance. So, instead, the array of
- * I/O locks is allocated in a separate tranche. Because those locks are
- * not highly contentended, we lay out the array with minimal padding.
+ * would increase the size of a BufferDesc to more than one cache line,
+ * and benchmarking has shown that keeping every BufferDesc aligned on a
+ * cache line boundary is important for performance. So, instead, the
+ * array of I/O locks is allocated in a separate tranche. Because those
+ * locks are not highly contentended, we lay out the array with minimal
+ * padding.
*/
size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
/* to allow aligning the above */
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 8a830d4f21d..59a8a85dfcd 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4291,8 +4291,8 @@ void
TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
{
if (!IsCatalogRelation(relation)
- && !RelationIsAccessibleInLogicalDecoding(relation)
- && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
+ && !RelationIsAccessibleInLogicalDecoding(relation)
+ && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
ereport(ERROR,
(errcode(ERRCODE_SNAPSHOT_TOO_OLD),
errmsg("snapshot too old")));
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 2ffa8ff24d0..bbd90c911aa 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -199,13 +199,13 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
*/
void
UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
- BlockNumber endBlkNum, Size freespace)
+ BlockNumber endBlkNum, Size freespace)
{
int new_cat = fsm_space_avail_to_cat(freespace);
FSMAddress addr;
uint16 slot;
- BlockNumber blockNum;
- BlockNumber lastBlkOnPage;
+ BlockNumber blockNum;
+ BlockNumber lastBlkOnPage;
blockNum = startBlkNum;
@@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
fsm_update_recursive(rel, addr, new_cat);
/*
- * Get the last block number on this FSM page. If that's greater
- * than or equal to our endBlkNum, we're done. Otherwise, advance
- * to the first block on the next page.
+ * Get the last block number on this FSM page. If that's greater than
+ * or equal to our endBlkNum, we're done. Otherwise, advance to the
+ * first block on the next page.
*/
lastBlkOnPage = fsm_get_lastblckno(rel, addr);
if (lastBlkOnPage >= endBlkNum)
@@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr)
int slot;
/*
- * Get the last slot number on the given address and convert that to
- * block number
+ * Get the last slot number on the given address and convert that to block
+ * number
*/
slot = SlotsPerFSMPage - 1;
return fsm_get_heap_blk(addr, slot);
@@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat)
return;
/*
- * Get the parent page and our slot in the parent page, and
- * update the information in that.
+ * Get the parent page and our slot in the parent page, and update the
+ * information in that.
*/
parent = fsm_get_parent(addr, &parentslot);
fsm_set_and_search(rel, parent, parentslot, new_cat, 0);
diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c
index cd13a6284c1..47f2bea0be3 100644
--- a/src/backend/storage/ipc/dsm.c
+++ b/src/backend/storage/ipc/dsm.c
@@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
}
/*
- * OK, the control segment looks basically valid, so we can use it to
- * get a list of segments that need to be removed.
+ * OK, the control segment looks basically valid, so we can use it to get
+ * a list of segments that need to be removed.
*/
nitems = old_control->nitems;
for (i = 0; i < nitems; ++i)
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index e9de51bdfa0..e5d487dbb74 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -460,7 +460,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact,
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+ pgxact->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
@@ -559,8 +559,8 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid)
/* Walk the list and clear all XIDs. */
while (nextidx != INVALID_PGPROCNO)
{
- PGPROC *proc = &allProcs[nextidx];
- PGXACT *pgxact = &allPgXact[nextidx];
+ PGPROC *proc = &allProcs[nextidx];
+ PGXACT *pgxact = &allPgXact[nextidx];
ProcArrayEndTransactionInternal(proc, pgxact, proc->procArrayGroupMemberXid);
@@ -580,7 +580,7 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid)
*/
while (wakeidx != INVALID_PGPROCNO)
{
- PGPROC *proc = &allProcs[wakeidx];
+ PGPROC *proc = &allProcs[wakeidx];
wakeidx = pg_atomic_read_u32(&proc->procArrayGroupNext);
pg_atomic_write_u32(&proc->procArrayGroupNext, INVALID_PGPROCNO);
@@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID)
Assert(TransactionIdIsNormal(initializedUptoXID));
/*
- * we set latestObservedXid to the xid SUBTRANS has been initialized up to,
- * so we can extend it from that point onwards in
+ * we set latestObservedXid to the xid SUBTRANS has been initialized up
+ * to, so we can extend it from that point onwards in
* RecordKnownAssignedTransactionIds, and when we get consistent in
* ProcArrayApplyRecoveryInfo().
*/
@@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
/*
* We ignore an invalid pxmin because this means that backend has
* no snapshot currently. We hold a Share lock to avoid contention
- * with users taking snapshots. That is not a problem because
- * the current xmin is always at least one higher than the latest
+ * with users taking snapshots. That is not a problem because the
+ * current xmin is always at least one higher than the latest
* removed xid, so any new snapshot would never conflict with the
* test here.
*/
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 03ca79b5e36..5f6226c9bb9 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -1007,8 +1007,8 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait,
static bool
shm_mq_counterparty_gone(volatile shm_mq *mq, BackgroundWorkerHandle *handle)
{
- bool detached;
- pid_t pid;
+ bool detached;
+ pid_t pid;
/* Acquire the lock just long enough to check the pointer. */
SpinLockAcquire(&mq->mq_mutex);
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 762dfa65eb9..7a512b3f81d 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag)
* We're already behind, so clear a path as quickly as possible.
*/
VirtualTransactionId *backends;
+
backends = GetLockConflicts(&locktag, AccessExclusiveLock);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_LOCK);
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 41f69306459..dba3809e740 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
uint32 partition = LockHashPartition(hashcode);
/*
- * It might seem unsafe to access proclock->groupLeader without a lock,
- * but it's not really. Either we are initializing a proclock on our
- * own behalf, in which case our group leader isn't changing because
- * the group leader for a process can only ever be changed by the
- * process itself; or else we are transferring a fast-path lock to the
- * main lock table, in which case that process can't change it's lock
- * group leader without first releasing all of its locks (and in
+ * It might seem unsafe to access proclock->groupLeader without a
+ * lock, but it's not really. Either we are initializing a proclock
+ * on our own behalf, in which case our group leader isn't changing
+ * because the group leader for a process can only ever be changed by
+ * the process itself; or else we are transferring a fast-path lock to
+ * the main lock table, in which case that process can't change it's
+ * lock group leader without first releasing all of its locks (and in
* particular the one we are currently transferring).
*/
proclock->groupLeader = proc->lockGroupLeader != NULL ?
@@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock, or
- * a lock held by another member of my locking group. First, figure out
- * how many conflicts remain after subtracting out any locks I hold
- * myself.
+ * Rats. Something conflicts. But it could still be my own lock, or a
+ * lock held by another member of my locking group. First, figure out how
+ * many conflicts remain after subtracting out any locks I hold myself.
*/
myLocks = proclock->holdMask;
for (i = 1; i <= numLockModes; i++)
@@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable,
/*
* Locks held in conflicting modes by members of our own lock group are
* not real conflicts; we can subtract those out and see if we still have
- * a conflict. This is O(N) in the number of processes holding or awaiting
- * locks on this object. We could improve that by making the shared memory
- * state more complex (and larger) but it doesn't seem worth it.
+ * a conflict. This is O(N) in the number of processes holding or
+ * awaiting locks on this object. We could improve that by making the
+ * shared memory state more complex (and larger) but it doesn't seem worth
+ * it.
*/
procLocks = &(lock->procLocks);
otherproclock = (PROCLOCK *)
@@ -1370,7 +1370,7 @@ LockCheckConflicts(LockMethod lockMethodTable,
proclock->groupLeader == otherproclock->groupLeader &&
(otherproclock->holdMask & conflictMask) != 0)
{
- int intersectMask = otherproclock->holdMask & conflictMask;
+ int intersectMask = otherproclock->holdMask & conflictMask;
for (i = 1; i <= numLockModes; i++)
{
@@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
*
* proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before
- * acquiring &proc->backendLock. In particular, it's certainly safe to
- * assume that if the target backend holds any fast-path locks, it
+ * acquiring &proc->backendLock. In particular, it's certainly safe
+ * to assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 25eec9800de..7ffa87d914b 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -208,25 +208,25 @@ PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%d: %s(%s): excl %u shared %u haswaiters %u waiters %u rOK %d",
- MyProcPid,
- where, MainLWLockNames[id],
- (state & LW_VAL_EXCLUSIVE) != 0,
- state & LW_SHARED_MASK,
- (state & LW_FLAG_HAS_WAITERS) != 0,
- pg_atomic_read_u32(&lock->nwaiters),
- (state & LW_FLAG_RELEASE_OK) != 0)));
+ MyProcPid,
+ where, MainLWLockNames[id],
+ (state & LW_VAL_EXCLUSIVE) != 0,
+ state & LW_SHARED_MASK,
+ (state & LW_FLAG_HAS_WAITERS) != 0,
+ pg_atomic_read_u32(&lock->nwaiters),
+ (state & LW_FLAG_RELEASE_OK) != 0)));
else
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%d: %s(%s %d): excl %u shared %u haswaiters %u waiters %u rOK %d",
- MyProcPid,
- where, T_NAME(lock), id,
- (state & LW_VAL_EXCLUSIVE) != 0,
- state & LW_SHARED_MASK,
- (state & LW_FLAG_HAS_WAITERS) != 0,
- pg_atomic_read_u32(&lock->nwaiters),
- (state & LW_FLAG_RELEASE_OK) != 0)));
+ MyProcPid,
+ where, T_NAME(lock), id,
+ (state & LW_VAL_EXCLUSIVE) != 0,
+ state & LW_SHARED_MASK,
+ (state & LW_FLAG_HAS_WAITERS) != 0,
+ pg_atomic_read_u32(&lock->nwaiters),
+ (state & LW_FLAG_RELEASE_OK) != 0)));
}
}
@@ -243,13 +243,13 @@ LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%s(%s): %s", where,
- MainLWLockNames[id], msg)));
+ MainLWLockNames[id], msg)));
else
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%s(%s %d): %s", where,
- T_NAME(lock), id, msg)));
+ T_NAME(lock), id, msg)));
}
}
@@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId)
/*
* It is quite possible that user has registered tranche in one of the
- * backends (e.g. by allocating lwlocks in dynamic shared memory) but
- * not all of them, so we can't assume the tranche is registered here.
+ * backends (e.g. by allocating lwlocks in dynamic shared memory) but not
+ * all of them, so we can't assume the tranche is registered here.
*/
if (eventId >= LWLockTranchesAllocated ||
LWLockTrancheArray[eventId]->name == NULL)
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index a66e07b7665..9a758bd9160 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -288,7 +288,7 @@ InitProcGlobal(void)
void
InitProcess(void)
{
- PGPROC * volatile * procgloballist;
+ PGPROC *volatile * procgloballist;
/*
* ProcGlobal should be set up already (if we are a backend, we inherit
@@ -342,8 +342,8 @@ InitProcess(void)
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/*
- * Cross-check that the PGPROC is of the type we expect; if this were
- * not the case, it would get returned to the wrong list.
+ * Cross-check that the PGPROC is of the type we expect; if this were not
+ * the case, it would get returned to the wrong list.
*/
Assert(MyProc->procgloballist == procgloballist);
@@ -781,7 +781,7 @@ static void
ProcKill(int code, Datum arg)
{
PGPROC *proc;
- PGPROC * volatile * procgloballist;
+ PGPROC *volatile * procgloballist;
Assert(MyProc != NULL);