diff options
author | Robert Haas | 2016-06-09 22:02:36 +0000 |
---|---|---|
committer | Robert Haas | 2016-06-09 22:02:36 +0000 |
commit | 4bc424b968058c7f0aa685821d7039e86faac99c (patch) | |
tree | a4e245ae67bd11edb3926ff5fb3b0223438ac283 /src/backend | |
parent | 9164deea2f4ac90ee5e008ff41fc5ad4423887b2 (diff) |
pgindent run for 9.6
Diffstat (limited to 'src/backend')
142 files changed, 1651 insertions, 1591 deletions
diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index 6f3e37cc37c..27ba0a97f8d 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -47,7 +47,7 @@ brin_xlog_insert_update(XLogReaderState *record, { XLogRecPtr lsn = record->EndRecPtr; Buffer buffer; - BlockNumber regpgno; + BlockNumber regpgno; Page page; XLogRedoAction action; diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index cdf074fc985..ba1f3aafed7 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -101,7 +101,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs table pages only to this percentage", RELOPT_KIND_HEAP, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100 }, @@ -110,7 +111,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs btree index pages only to this percentage", RELOPT_KIND_BTREE, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100 }, @@ -119,7 +121,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs hash index pages only to this percentage", RELOPT_KIND_HASH, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100 }, @@ -128,7 +131,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs gist index pages only to this percentage", RELOPT_KIND_GIST, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100 }, @@ -137,7 +141,8 @@ static relopt_int intRelOpts[] = "fillfactor", "Packs spgist index pages only to this percentage", RELOPT_KIND_SPGIST, - ShareUpdateExclusiveLock /* since it applies only to later inserts */ + ShareUpdateExclusiveLock /* since it applies only to later + * inserts */ }, SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100 }, @@ -1475,8 +1480,8 @@ tablespace_reloptions(Datum reloptions, bool validate) LOCKMODE AlterTableGetRelOptionsLockLevel(List *defList) { - LOCKMODE lockmode = NoLock; - ListCell *cell; + LOCKMODE lockmode = NoLock; + ListCell *cell; if (defList == NIL) return AccessExclusiveLock; @@ -1486,8 +1491,8 @@ AlterTableGetRelOptionsLockLevel(List *defList) foreach(cell, defList) { - DefElem *def = (DefElem *) lfirst(cell); - int i; + DefElem *def = (DefElem *) lfirst(cell); + int i; for (i = 0; relOpts[i]; i++) { diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 5cf737f6213..59a63f28d08 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -524,7 +524,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead, int64 nDeletedHeapTuples = 0; ginxlogDeleteListPages data; Buffer buffers[GIN_NDELETE_AT_ONCE]; - BlockNumber freespace[GIN_NDELETE_AT_ONCE]; + BlockNumber freespace[GIN_NDELETE_AT_ONCE]; data.ndeleted = 0; while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead) @@ -745,30 +745,29 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, bool inVacuum = (stats == NULL); /* - * We would like to prevent concurrent cleanup process. For - * that we will lock metapage in exclusive mode using LockPage() - * call. Nobody other will use that lock for metapage, so - * we keep possibility of concurrent insertion into pending list + * We would like to prevent concurrent cleanup process. For that we will + * lock metapage in exclusive mode using LockPage() call. Nobody other + * will use that lock for metapage, so we keep possibility of concurrent + * insertion into pending list */ if (inVacuum) { /* - * We are called from [auto]vacuum/analyze or - * gin_clean_pending_list() and we would like to wait - * concurrent cleanup to finish. + * We are called from [auto]vacuum/analyze or gin_clean_pending_list() + * and we would like to wait concurrent cleanup to finish. */ LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock); workMemory = (IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ? - autovacuum_work_mem : maintenance_work_mem; + autovacuum_work_mem : maintenance_work_mem; } else { /* - * We are called from regular insert and if we see - * concurrent cleanup just exit in hope that concurrent - * process will clean up pending list. + * We are called from regular insert and if we see concurrent cleanup + * just exit in hope that concurrent process will clean up pending + * list. */ if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock)) return; @@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, Assert(!GinPageIsDeleted(page)); /* - * Are we walk through the page which as we remember was a tail when we - * start our cleanup? But if caller asks us to clean up whole pending - * list then ignore old tail, we will work until list becomes empty. + * Are we walk through the page which as we remember was a tail when + * we start our cleanup? But if caller asks us to clean up whole + * pending list then ignore old tail, we will work until list becomes + * empty. */ if (blkno == blknoFinish && full_clean == false) cleanupFinish = true; @@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, * locking */ /* - * remove read pages from pending list, at this point all - * content of read pages is in regular structure + * remove read pages from pending list, at this point all content + * of read pages is in regular structure */ shiftList(index, metabuffer, blkno, fill_fsm, stats); @@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean, ReleaseBuffer(metabuffer); /* - * As pending list pages can have a high churn rate, it is - * desirable to recycle them immediately to the FreeSpace Map when - * ordinary backends clean the list. + * As pending list pages can have a high churn rate, it is desirable to + * recycle them immediately to the FreeSpace Map when ordinary backends + * clean the list. */ if (fsm_vac && fill_fsm) IndexFreeSpaceMapVacuum(index); @@ -989,7 +989,7 @@ gin_clean_pending_list(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("recovery is in progress"), - errhint("GIN pending list cannot be cleaned up during recovery."))); + errhint("GIN pending list cannot be cleaned up during recovery."))); /* Must be a GIN index */ if (indexRel->rd_rel->relkind != RELKIND_INDEX || diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 7a9c67aca84..9f784bf48d1 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values, &htup->t_self); /* If we've maxed out our available memory, dump everything to the index */ - if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L) + if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L) { ItemPointerData *list; Datum key; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 1934c37534d..c258478f232 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, { /* Yes, so initialize stats to zeroes */ stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult)); + /* - * and cleanup any pending inserts */ + * and cleanup any pending inserts + */ ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(), false, stats); } diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index a29088728d2..fdf0c5a5cf4 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1498,8 +1498,9 @@ static void gistvacuumpage(Relation rel, Page page, Buffer buffer) { OffsetNumber deletable[MaxIndexTuplesPerPage]; - int ndeletable = 0; - OffsetNumber offnum, maxoff; + int ndeletable = 0; + OffsetNumber offnum, + maxoff; Assert(GistPageIsLeaf(page)); diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index affd63535ad..5ba7d0a7930 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -36,13 +36,13 @@ static void gistkillitems(IndexScanDesc scan) { - GISTScanOpaque so = (GISTScanOpaque) scan->opaque; - Buffer buffer; - Page page; - OffsetNumber offnum; - ItemId iid; - int i; - bool killedsomething = false; + GISTScanOpaque so = (GISTScanOpaque) scan->opaque; + Buffer buffer; + Page page; + OffsetNumber offnum; + ItemId iid; + int i; + bool killedsomething = false; Assert(so->curBlkno != InvalidBlockNumber); Assert(!XLogRecPtrIsInvalid(so->curPageLSN)); @@ -57,21 +57,22 @@ gistkillitems(IndexScanDesc scan) page = BufferGetPage(buffer); /* - * If page LSN differs it means that the page was modified since the last read. - * killedItems could be not valid so LP_DEAD hints applying is not safe. + * If page LSN differs it means that the page was modified since the last + * read. killedItems could be not valid so LP_DEAD hints applying is not + * safe. */ - if(PageGetLSN(page) != so->curPageLSN) + if (PageGetLSN(page) != so->curPageLSN) { UnlockReleaseBuffer(buffer); - so->numKilled = 0; /* reset counter */ + so->numKilled = 0; /* reset counter */ return; } Assert(GistPageIsLeaf(page)); /* - * Mark all killedItems as dead. We need no additional recheck, - * because, if page was modified, pageLSN must have changed. + * Mark all killedItems as dead. We need no additional recheck, because, + * if page was modified, pageLSN must have changed. */ for (i = 0; i < so->numKilled; i++) { @@ -390,7 +391,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, maxoff = PageGetMaxOffsetNumber(page); for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { - ItemId iid = PageGetItemId(page, i); + ItemId iid = PageGetItemId(page, i); IndexTuple it; bool match; bool recheck; @@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances, * If the scan specifies not to return killed tuples, then we treat a * killed tuple as not passing the qual. */ - if(scan->ignore_killed_tuples && ItemIdIsDead(iid)) + if (scan->ignore_killed_tuples && ItemIdIsDead(iid)) continue; it = (IndexTuple) PageGetItem(page, iid); + /* * Must call gistindex_keytest in tempCxt, and clean up any leftover * junk afterward. @@ -665,11 +667,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage - * sizeof(OffsetNumber)); + * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } @@ -702,11 +704,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir) if (so->killedItems == NULL) { MemoryContext oldCxt = - MemoryContextSwitchTo(so->giststate->scanCxt); + MemoryContextSwitchTo(so->giststate->scanCxt); so->killedItems = (OffsetNumber *) palloc(MaxIndexTuplesPerPage - * sizeof(OffsetNumber)); + * sizeof(OffsetNumber)); MemoryContextSwitchTo(oldCxt); } diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 328e54b85ed..6f07cd8d46e 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, ScanKey skey = scan->keyData + i; /* - * Copy consistent support function to ScanKey structure - * instead of function implementing filtering operator. + * Copy consistent support function to ScanKey structure instead + * of function implementing filtering operator. */ fmgr_info_copy(&(skey->sk_func), &(so->giststate->consistentFn[skey->sk_attno - 1]), @@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys, so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid); /* - * Copy distance support function to ScanKey structure - * instead of function implementing ordering operator. + * Copy distance support function to ScanKey structure instead of + * function implementing ordering operator. */ fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt); diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 4041f9cc5a6..6db62410979 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan) { BlockNumber page = InvalidBlockNumber; BlockNumber sync_startpage = InvalidBlockNumber; - BlockNumber report_page = InvalidBlockNumber; + BlockNumber report_page = InvalidBlockNumber; ParallelHeapScanDesc parallel_scan; Assert(scan->rs_parallel); diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 6b850920c7c..c90fb71965c 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -178,7 +178,7 @@ static void RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) { Page page; - BlockNumber blockNum = InvalidBlockNumber, + BlockNumber blockNum = InvalidBlockNumber, firstBlock = InvalidBlockNumber; int extraBlocks = 0; int lockWaiters = 0; @@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) return; /* - * It might seem like multiplying the number of lock waiters by as much - * as 20 is too aggressive, but benchmarking revealed that smaller numbers - * were insufficient. 512 is just an arbitrary cap to prevent pathological - * results. + * It might seem like multiplying the number of lock waiters by as much as + * 20 is too aggressive, but benchmarking revealed that smaller numbers + * were insufficient. 512 is just an arbitrary cap to prevent + * pathological results. */ extraBlocks = Min(512, lockWaiters * 20); @@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate) } /* - * Updating the upper levels of the free space map is too expensive - * to do for every block, but it's worth doing once at the end to make - * sure that subsequent insertion activity sees all of those nifty free - * pages we just inserted. + * Updating the upper levels of the free space map is too expensive to do + * for every block, but it's worth doing once at the end to make sure that + * subsequent insertion activity sees all of those nifty free pages we + * just inserted. * * Note that we're using the freespace value that was reported for the * last block we added as if it were the freespace value for every block @@ -547,8 +547,8 @@ loop: } /* - * In addition to whatever extension we performed above, we always add - * at least one block to satisfy our own request. + * In addition to whatever extension we performed above, we always add at + * least one block to satisfy our own request. * * XXX This does an lseek - rather expensive - but at the moment it is the * only way to accurately determine how many blocks are in a relation. Is diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index eb7ae8f2264..6ff92516eda 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer) OldestXmin = RecentGlobalXmin; else OldestXmin = - TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin, - relation); + TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin, + relation); Assert(TransactionIdIsValid(OldestXmin)); diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 05422f10799..b472d31a03c 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk); uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk); Page page; - uint8 *map; + uint8 *map; #ifdef TRACE_VISIBILITYMAP elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk); @@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, elog(ERROR, "wrong VM buffer passed to visibilitymap_set"); page = BufferGetPage(vmBuf); - map = (uint8 *)PageGetContents(page); + map = (uint8 *) PageGetContents(page); LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE); if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS)) diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index ecc43e51c92..ef69290b6c9 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * Check for a conflict-in as we would if we were going to * write to this page. We aren't actually going to write, * but we want a chance to report SSI conflicts that would - * otherwise be masked by this unique constraint violation. + * otherwise be masked by this unique constraint + * violation. */ CheckForSerializableConflictIn(rel, NULL, buf); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 3a7942997c2..1f479735c20 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* * Check to see if we need to issue one final WAL record for this index, - * which may be needed for correctness on a hot standby node when - * non-MVCC index scans could take place. + * which may be needed for correctness on a hot standby node when non-MVCC + * index scans could take place. * * If the WAL is replayed in hot standby, the replay process needs to get * cleanup locks on all index leaf pages, just as we've been doing here. @@ -1025,13 +1025,13 @@ restart: if (ndeletable > 0) { /* - * Notice that the issued XLOG_BTREE_VACUUM WAL record includes all - * information to the replay code to allow it to get a cleanup lock - * on all pages between the previous lastBlockVacuumed and this page. - * This ensures that WAL replay locks all leaf pages at some point, - * which is important should non-MVCC scans be requested. - * This is currently unused on standby, but we record it anyway, so - * that the WAL contains the required information. + * Notice that the issued XLOG_BTREE_VACUUM WAL record includes + * all information to the replay code to allow it to get a cleanup + * lock on all pages between the previous lastBlockVacuumed and + * this page. This ensures that WAL replay locks all leaf pages at + * some point, which is important should non-MVCC scans be + * requested. This is currently unused on standby, but we record + * it anyway, so that the WAL contains the required information. * * Since we can visit leaf pages out-of-order when recursing, * replay might end up locking such pages an extra time, but it diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index f8691bbc44a..c536e224321 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record) xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record); /* - * This section of code is thought to be no longer needed, after - * analysis of the calling paths. It is retained to allow the code - * to be reinstated if a flaw is revealed in that thinking. + * This section of code is thought to be no longer needed, after analysis + * of the calling paths. It is retained to allow the code to be reinstated + * if a flaw is revealed in that thinking. * * If we are running non-MVCC scans using this index we need to do some * additional work to ensure correctness, which is known as a "pin scan" * described in more detail in next paragraphs. We used to do the extra - * work in all cases, whereas we now avoid that work in most cases. - * If lastBlockVacuumed is set to InvalidBlockNumber then we skip the + * work in all cases, whereas we now avoid that work in most cases. If + * lastBlockVacuumed is set to InvalidBlockNumber then we skip the * additional work required for the pin scan. * * Avoiding this extra work is important since it requires us to touch diff --git a/src/backend/access/rmgrdesc/genericdesc.c b/src/backend/access/rmgrdesc/genericdesc.c index 0796bb87414..22f81570a54 100644 --- a/src/backend/access/rmgrdesc/genericdesc.c +++ b/src/backend/access/rmgrdesc/genericdesc.c @@ -29,8 +29,8 @@ generic_desc(StringInfo buf, XLogReaderState *record) while (ptr < end) { - OffsetNumber offset, - length; + OffsetNumber offset, + length; memcpy(&offset, ptr, sizeof(offset)); ptr += sizeof(offset); diff --git a/src/backend/access/rmgrdesc/logicalmsgdesc.c b/src/backend/access/rmgrdesc/logicalmsgdesc.c index b194e1424d8..525826efd3f 100644 --- a/src/backend/access/rmgrdesc/logicalmsgdesc.c +++ b/src/backend/access/rmgrdesc/logicalmsgdesc.c @@ -26,7 +26,7 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record) xl_logical_message *xlrec = (xl_logical_message *) rec; appendStringInfo(buf, "%s message size %zu bytes", - xlrec->transactional ? "transactional" : "nontransactional", + xlrec->transactional ? "transactional" : "nontransactional", xlrec->message_size); } } diff --git a/src/backend/access/rmgrdesc/standbydesc.c b/src/backend/access/rmgrdesc/standbydesc.c index e6172ccdf73..13797a3d2f4 100644 --- a/src/backend/access/rmgrdesc/standbydesc.c +++ b/src/backend/access/rmgrdesc/standbydesc.c @@ -100,7 +100,7 @@ standby_desc_invalidations(StringInfo buf, Oid dbId, Oid tsId, bool relcacheInitFileInval) { - int i; + int i; if (relcacheInitFileInval) appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u", diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 6f07c5cfaac..91d27d0654e 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -205,8 +205,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId if (parsed.nmsgs > 0) { standby_desc_invalidations( - buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId, - XactCompletionRelcacheInitFileInval(parsed.xinfo)); + buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId, + XactCompletionRelcacheInitFileInval(parsed.xinfo)); } if (XactCompletionForceSyncCommit(parsed.xinfo)) diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index 022bd44eff2..62ed1dc04b3 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -26,8 +26,8 @@ const struct config_enum_entry wal_level_options[] = { {"minimal", WAL_LEVEL_MINIMAL, false}, {"replica", WAL_LEVEL_REPLICA, false}, - {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */ - {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */ + {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */ + {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */ {"logical", WAL_LEVEL_LOGICAL, false}, {NULL, 0, false} }; diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 17134396a4b..e330105217d 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -92,7 +92,7 @@ typedef struct CommitTimestampShared { TransactionId xidLastCommit; CommitTimestampEntry dataLastCommit; - bool commitTsActive; + bool commitTsActive; } CommitTimestampShared; CommitTimestampShared *commitTsShared; @@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids, * No-op if the module is not active. * * An unlocked read here is fine, because in a standby (the only place - * where the flag can change in flight) this routine is only called by - * the recovery process, which is also the only process which can change - * the flag. + * where the flag can change in flight) this routine is only called by the + * recovery process, which is also the only process which can change the + * flag. */ if (!commitTsShared->commitTsActive) return; @@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact) int pageno; /* - * Nothing to do if module not enabled. Note we do an unlocked read of the - * flag here, which is okay because this routine is only called from + * Nothing to do if module not enabled. Note we do an unlocked read of + * the flag here, which is okay because this routine is only called from * GetNewTransactionId, which is never called in a standby. */ Assert(!InRecovery); @@ -855,7 +855,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact) { LWLockAcquire(CommitTsLock, LW_EXCLUSIVE); if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId && - TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact)) + TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact)) ShmemVariableCache->oldestCommitTsXid = oldestXact; LWLockRelease(CommitTsLock); } diff --git a/src/backend/access/transam/generic_xlog.c b/src/backend/access/transam/generic_xlog.c index c33e7beb6a4..1926d98de00 100644 --- a/src/backend/access/transam/generic_xlog.c +++ b/src/backend/access/transam/generic_xlog.c @@ -52,9 +52,8 @@ typedef struct Buffer buffer; /* registered buffer */ int flags; /* flags for this buffer */ int deltaLen; /* space consumed in delta field */ - char *image; /* copy of page image for modification, - * do not do it in-place to have aligned - * memory chunk */ + char *image; /* copy of page image for modification, do not + * do it in-place to have aligned memory chunk */ char delta[MAX_DELTA_SIZE]; /* delta between page images */ } PageData; diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index a677af00496..7bccca8a17b 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) char *oldest_datname = get_database_name(oldest_datoid); /* - * Immediately kick autovacuum into action as we're already - * in ERROR territory. + * Immediately kick autovacuum into action as we're already in + * ERROR territory. */ SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER); @@ -1134,8 +1134,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used", "database with OID %u must be vacuumed before %d more multixact members are used", - MultiXactState->offsetStopLimit - nextOffset + nmembers, - MultiXactState->oldestMultiXactDB, + MultiXactState->offsetStopLimit - nextOffset + nmembers, + MultiXactState->oldestMultiXactDB, MultiXactState->offsetStopLimit - nextOffset + nmembers), errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings."))); diff --git a/src/backend/access/transam/parallel.c b/src/backend/access/transam/parallel.c index 934dba88c66..74a483e0fd9 100644 --- a/src/backend/access/transam/parallel.c +++ b/src/backend/access/transam/parallel.c @@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers) nworkers = 0; /* - * If we are running under serializable isolation, we can't use - * parallel workers, at least not until somebody enhances that mechanism - * to be parallel-aware. + * If we are running under serializable isolation, we can't use parallel + * workers, at least not until somebody enhances that mechanism to be + * parallel-aware. */ if (IsolationIsSerializable()) nworkers = 0; @@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt) } /* - * We can't finish transaction commit or abort until all of the - * workers have exited. This means, in particular, that we can't respond - * to interrupts at this stage. + * We can't finish transaction commit or abort until all of the workers + * have exited. This means, in particular, that we can't respond to + * interrupts at this stage. */ HOLD_INTERRUPTS(); WaitForParallelWorkersToExit(pcxt); @@ -918,7 +918,7 @@ ParallelWorkerMain(Datum main_arg) if (toc == NULL) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid magic number in dynamic shared memory segment"))); + errmsg("invalid magic number in dynamic shared memory segment"))); /* Look up fixed parallel state. */ fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED); @@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg) */ /* - * Join locking group. We must do this before anything that could try - * to acquire a heavyweight lock, because any heavyweight locks acquired - * to this point could block either directly against the parallel group + * Join locking group. We must do this before anything that could try to + * acquire a heavyweight lock, because any heavyweight locks acquired to + * this point could block either directly against the parallel group * leader or against some process which in turn waits for a lock that * conflicts with the parallel group leader, causing an undetected * deadlock. (If we can't join the lock group, the leader has gone away, diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 36a011cc94e..bbae5847f2d 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -152,7 +152,7 @@ SimpleLruShmemSize(int nslots, int nlsns) sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */ - sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */ + sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */ if (nlsns > 0) sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */ @@ -224,7 +224,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, for (slotno = 0; slotno < nslots; slotno++) { LWLockInitialize(&shared->buffer_locks[slotno].lock, - shared->lwlock_tranche_id); + shared->lwlock_tranche_id); shared->page_buffer[slotno] = ptr; shared->page_status[slotno] = SLRU_PAGE_EMPTY; diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index c02046c0730..908fe2d5331 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID) startPage++; /* must account for wraparound */ if (startPage > TransactionIdToPage(MaxTransactionId)) - startPage=0; + startPage = 0; } (void) ZeroSUBTRANSPage(startPage); diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index a65048b683b..06aedd40bf1 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -140,13 +140,13 @@ typedef struct GlobalTransactionData TimestampTz prepared_at; /* time of preparation */ /* - * Note that we need to keep track of two LSNs for each GXACT. - * We keep track of the start LSN because this is the address we must - * use to read state data back from WAL when committing a prepared GXACT. - * We keep track of the end LSN because that is the LSN we need to wait - * for prior to commit. + * Note that we need to keep track of two LSNs for each GXACT. We keep + * track of the start LSN because this is the address we must use to read + * state data back from WAL when committing a prepared GXACT. We keep + * track of the end LSN because that is the LSN we need to wait for prior + * to commit. */ - XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ + XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */ XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */ Oid owner; /* ID of user that executed the xact */ @@ -980,7 +980,7 @@ StartPrepare(GlobalTransaction gxact) hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels); hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs, &hdr.initfileinval); - hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */ + hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */ save_state_data(&hdr, sizeof(TwoPhaseFileHeader)); save_state_data(gxact->gid, hdr.gidlen); @@ -1259,28 +1259,28 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating an XLog reading processor."))); + errdetail("Failed while allocating an XLog reading processor."))); record = XLogReadRecord(xlogreader, lsn, &errormsg); if (record == NULL) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read two-phase state from xlog at %X/%X", - (uint32) (lsn >> 32), - (uint32) lsn))); + (uint32) (lsn >> 32), + (uint32) lsn))); if (XLogRecGetRmid(xlogreader) != RM_XACT_ID || (XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE) ereport(ERROR, (errcode_for_file_access(), errmsg("expected two-phase state data is not present in xlog at %X/%X", - (uint32) (lsn >> 32), - (uint32) lsn))); + (uint32) (lsn >> 32), + (uint32) lsn))); if (len != NULL) *len = XLogRecGetDataLen(xlogreader); - *buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader)); + *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader)); memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader)); XLogReaderFree(xlogreader); @@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) xid = pgxact->xid; /* - * Read and validate 2PC state data. - * State data will typically be stored in WAL files if the LSN is after the - * last checkpoint record, or moved to disk if for some reason they have - * lived for a long time. + * Read and validate 2PC state data. State data will typically be stored + * in WAL files if the LSN is after the last checkpoint record, or moved + * to disk if for some reason they have lived for a long time. */ if (gxact->ondisk) buf = ReadTwoPhaseFile(xid, true); @@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START(); /* - * We are expecting there to be zero GXACTs that need to be - * copied to disk, so we perform all I/O while holding - * TwoPhaseStateLock for simplicity. This prevents any new xacts - * from preparing while this occurs, which shouldn't be a problem - * since the presence of long-lived prepared xacts indicates the - * transaction manager isn't active. + * We are expecting there to be zero GXACTs that need to be copied to + * disk, so we perform all I/O while holding TwoPhaseStateLock for + * simplicity. This prevents any new xacts from preparing while this + * occurs, which shouldn't be a problem since the presence of long-lived + * prepared xacts indicates the transaction manager isn't active. * - * It's also possible to move I/O out of the lock, but on - * every error we should check whether somebody committed our - * transaction in different backend. Let's leave this optimisation - * for future, if somebody will spot that this place cause - * bottleneck. + * It's also possible to move I/O out of the lock, but on every error we + * should check whether somebody committed our transaction in different + * backend. Let's leave this optimisation for future, if somebody will + * spot that this place cause bottleneck. * - * Note that it isn't possible for there to be a GXACT with - * a prepare_end_lsn set prior to the last checkpoint yet - * is marked invalid, because of the efforts with delayChkpt. + * Note that it isn't possible for there to be a GXACT with a + * prepare_end_lsn set prior to the last checkpoint yet is marked invalid, + * because of the efforts with delayChkpt. */ LWLockAcquire(TwoPhaseStateLock, LW_SHARED); for (i = 0; i < TwoPhaseState->numPrepXacts; i++) @@ -1633,7 +1630,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) gxact->prepare_end_lsn <= redo_horizon) { char *buf; - int len; + int len; XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len); RecreateTwoPhaseFile(pgxact->xid, buf, len); @@ -1920,7 +1917,7 @@ RecoverPreparedTransactions(void) TwoPhaseFileHeader *hdr; TransactionId *subxids; GlobalTransaction gxact; - const char *gid; + const char *gid; int i; xid = (TransactionId) strtoul(clde->d_name, NULL, 16); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 95690ff36cb..23f36ead7e5 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1166,19 +1166,19 @@ RecordTransactionCommit(void) /* * Transactions without an assigned xid can contain invalidation * messages (e.g. explicit relcache invalidations or catcache - * invalidations for inplace updates); standbys need to process - * those. We can't emit a commit record without an xid, and we don't - * want to force assigning an xid, because that'd be problematic for - * e.g. vacuum. Hence we emit a bespoke record for the - * invalidations. We don't want to use that in case a commit record is - * emitted, so they happen synchronously with commits (besides not - * wanting to emit more WAL recoreds). + * invalidations for inplace updates); standbys need to process those. + * We can't emit a commit record without an xid, and we don't want to + * force assigning an xid, because that'd be problematic for e.g. + * vacuum. Hence we emit a bespoke record for the invalidations. We + * don't want to use that in case a commit record is emitted, so they + * happen synchronously with commits (besides not wanting to emit more + * WAL recoreds). */ if (nmsgs != 0) { LogStandbyInvalidations(nmsgs, invalMessages, RelcacheInitFileInval); - wrote_xlog = true; /* not strictly necessary */ + wrote_xlog = true; /* not strictly necessary */ } /* @@ -1272,8 +1272,8 @@ RecordTransactionCommit(void) * this case, but we don't currently try to do that. It would certainly * cause problems at least in Hot Standby mode, where the * KnownAssignedXids machinery requires tracking every XID assignment. It - * might be OK to skip it only when wal_level < replica, but for now - * we don't.) + * might be OK to skip it only when wal_level < replica, but for now we + * don't.) * * However, if we're doing cleanup of any non-temp rels or committing any * command that wanted to force sync commit, then we must flush XLOG @@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed, /* * If asked by the primary (because someone is waiting for a synchronous - * commit = remote_apply), we will need to ask walreceiver to send a - * reply immediately. + * commit = remote_apply), we will need to ask walreceiver to send a reply + * immediately. */ if (XactCompletionApplyFeedback(parsed->xinfo)) XLogRequestWalReceiverReply(); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b473f1914e8..e4645a31691 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5004,9 +5004,9 @@ readRecoveryCommandFile(void) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for recovery parameter \"%s\": \"%s\"", - "recovery_target_action", - item->value), + errmsg("invalid value for recovery parameter \"%s\": \"%s\"", + "recovery_target_action", + item->value), errhint("Valid values are \"pause\", \"promote\", and \"shutdown\"."))); ereport(DEBUG2, @@ -5087,9 +5087,9 @@ readRecoveryCommandFile(void) else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for recovery parameter \"%s\": \"%s\"", - "recovery_target", - item->value), + errmsg("invalid value for recovery parameter \"%s\": \"%s\"", + "recovery_target", + item->value), errhint("The only allowed value is \"immediate\"."))); ereport(DEBUG2, (errmsg_internal("recovery_target = '%s'", @@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void) } /* - * For Hot Standby, the WAL must be generated with 'replica' mode, and - * we must have at least as many backend slots as the primary. + * For Hot Standby, the WAL must be generated with 'replica' mode, and we + * must have at least as many backend slots as the primary. */ if (ArchiveRecoveryRequested && EnableHotStandby) { @@ -6163,26 +6163,26 @@ StartupXLOG(void) * is no use of such file. There is no harm in retaining it, but it * is better to get rid of the map file so that we don't have any * redundant file in data directory and it will avoid any sort of - * confusion. It seems prudent though to just rename the file out - * of the way rather than delete it completely, also we ignore any - * error that occurs in rename operation as even if map file is - * present without backup_label file, it is harmless. + * confusion. It seems prudent though to just rename the file out of + * the way rather than delete it completely, also we ignore any error + * that occurs in rename operation as even if map file is present + * without backup_label file, it is harmless. */ if (stat(TABLESPACE_MAP, &st) == 0) { unlink(TABLESPACE_MAP_OLD); if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0) ereport(LOG, - (errmsg("ignoring file \"%s\" because no file \"%s\" exists", - TABLESPACE_MAP, BACKUP_LABEL_FILE), - errdetail("File \"%s\" was renamed to \"%s\".", - TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + (errmsg("ignoring file \"%s\" because no file \"%s\" exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("File \"%s\" was renamed to \"%s\".", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); else ereport(LOG, - (errmsg("ignoring file \"%s\" because no file \"%s\" exists", - TABLESPACE_MAP, BACKUP_LABEL_FILE), - errdetail("Could not rename file \"%s\" to \"%s\": %m.", - TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + (errmsg("ignoring file \"%s\" because no file \"%s\" exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("Could not rename file \"%s\" to \"%s\": %m.", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); } /* @@ -6314,24 +6314,24 @@ StartupXLOG(void) ereport(DEBUG1, (errmsg_internal("redo record is at %X/%X; shutdown %s", (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo, - wasShutdown ? "TRUE" : "FALSE"))); + wasShutdown ? "TRUE" : "FALSE"))); ereport(DEBUG1, (errmsg_internal("next transaction ID: %u:%u; next OID: %u", - checkPoint.nextXidEpoch, checkPoint.nextXid, - checkPoint.nextOid))); + checkPoint.nextXidEpoch, checkPoint.nextXid, + checkPoint.nextOid))); ereport(DEBUG1, (errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u", - checkPoint.nextMulti, checkPoint.nextMultiOffset))); + checkPoint.nextMulti, checkPoint.nextMultiOffset))); ereport(DEBUG1, - (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u", - checkPoint.oldestXid, checkPoint.oldestXidDB))); + (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u", + checkPoint.oldestXid, checkPoint.oldestXidDB))); ereport(DEBUG1, (errmsg_internal("oldest MultiXactId: %u, in database %u", - checkPoint.oldestMulti, checkPoint.oldestMultiDB))); + checkPoint.oldestMulti, checkPoint.oldestMultiDB))); ereport(DEBUG1, (errmsg_internal("commit timestamp Xid oldest/newest: %u/%u", - checkPoint.oldestCommitTsXid, - checkPoint.newestCommitTsXid))); + checkPoint.oldestCommitTsXid, + checkPoint.newestCommitTsXid))); if (!TransactionIdIsNormal(checkPoint.nextXid)) ereport(PANIC, (errmsg("invalid next transaction ID"))); @@ -6883,8 +6883,8 @@ StartupXLOG(void) SpinLockRelease(&XLogCtl->info_lck); /* - * If rm_redo called XLogRequestWalReceiverReply, then we - * wake up the receiver so that it notices the updated + * If rm_redo called XLogRequestWalReceiverReply, then we wake + * up the receiver so that it notices the updated * lastReplayedEndRecPtr and sends a reply to the master. */ if (doRequestWalReceiverReply) diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index de493fad7a4..33383b4dccb 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS) MemoryContext oldcontext; /* - * Label file and tablespace map file need to be long-lived, since they - * are read in pg_stop_backup. + * Label file and tablespace map file need to be long-lived, since + * they are read in pg_stop_backup. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); label_file = makeStringInfo(); @@ -113,7 +113,7 @@ pg_start_backup(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file, - dir, NULL, tblspc_map_file, false, true); + dir, NULL, tblspc_map_file, false, true); nonexclusive_backup_running = true; before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0); @@ -138,8 +138,8 @@ pg_start_backup(PG_FUNCTION_ARGS) * Note: different from CancelBackup which just cancels online backup mode. * * Note: this version is only called to stop an exclusive backup. The function - * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to - * stop non-exclusive backups. + * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to + * stop non-exclusive backups. * * Permission checking for this function is managed through the normal * GRANT system. @@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS) errhint("Did you mean to use pg_stop_backup('f')?"))); /* - * Exclusive backups were typically started in a different connection, - * so don't try to verify that exclusive_backup_running is set in this one. - * Actual verification that an exclusive backup is in fact running is handled - * inside do_pg_stop_backup. + * Exclusive backups were typically started in a different connection, so + * don't try to verify that exclusive_backup_running is set in this one. + * Actual verification that an exclusive backup is in fact running is + * handled inside do_pg_stop_backup. */ stoppoint = do_pg_stop_backup(NULL, true, NULL); @@ -182,16 +182,16 @@ pg_stop_backup(PG_FUNCTION_ARGS) Datum pg_stop_backup_v2(PG_FUNCTION_ARGS) { - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - TupleDesc tupdesc; + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; Tuplestorestate *tupstore; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - Datum values[3]; - bool nulls[3]; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + Datum values[3]; + bool nulls[3]; - bool exclusive = PG_GETARG_BOOL(0); - XLogRecPtr stoppoint; + bool exclusive = PG_GETARG_BOOL(0); + XLogRecPtr stoppoint; /* check to see if caller supports us returning a tuplestore */ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) @@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) errhint("Did you mean to use pg_stop_backup('t')?"))); /* - * Stop the non-exclusive backup. Return a copy of the backup - * label and tablespace map so they can be written to disk by - * the caller. + * Stop the non-exclusive backup. Return a copy of the backup label + * and tablespace map so they can be written to disk by the caller. */ stoppoint = do_pg_stop_backup(label_file->data, true, NULL); nonexclusive_backup_running = false; @@ -269,7 +268,7 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS) } /* Stoppoint is included on both exclusive and nonexclusive backups */ - values[0] = LSNGetDatum(stoppoint); + values[0] = LSNGetDatum(stoppoint); tuplestore_putvalues(tupstore, tupdesc, values, nulls); tuplestore_donestoring(typstore); diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index c3aecc75746..dcf747c6334 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -322,7 +322,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) if (total_len < SizeOfXLogRecord) { report_invalid_record(state, - "invalid record length at %X/%X: wanted %u, got %u", + "invalid record length at %X/%X: wanted %u, got %u", (uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) SizeOfXLogRecord, total_len); goto err; @@ -621,7 +621,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, if (record->xl_tot_len < SizeOfXLogRecord) { report_invalid_record(state, - "invalid record length at %X/%X: wanted %u, got %u", + "invalid record length at %X/%X: wanted %u, got %u", (uint32) (RecPtr >> 32), (uint32) RecPtr, (uint32) SizeOfXLogRecord, record->xl_tot_len); return false; diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 13244610db1..8068b82eab8 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -1792,7 +1792,7 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognized default ACL object type %c", objtype), - errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\"."))); + errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\"."))); } /* diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index bcc941104f5..73d19ec3947 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -82,9 +82,9 @@ AggregateCreate(const char *aggName, Form_pg_proc proc; Oid transfn; Oid finalfn = InvalidOid; /* can be omitted */ - Oid combinefn = InvalidOid; /* can be omitted */ + Oid combinefn = InvalidOid; /* can be omitted */ Oid serialfn = InvalidOid; /* can be omitted */ - Oid deserialfn = InvalidOid; /* can be omitted */ + Oid deserialfn = InvalidOid; /* can be omitted */ Oid mtransfn = InvalidOid; /* can be omitted */ Oid minvtransfn = InvalidOid; /* can be omitted */ Oid mfinalfn = InvalidOid; /* can be omitted */ @@ -407,11 +407,11 @@ AggregateCreate(const char *aggName, /* handle the combinefn, if supplied */ if (aggcombinefnName) { - Oid combineType; + Oid combineType; /* - * Combine function must have 2 argument, each of which is the - * trans type + * Combine function must have 2 argument, each of which is the trans + * type */ fnArgs[0] = aggTransType; fnArgs[1] = aggTransType; @@ -423,9 +423,9 @@ AggregateCreate(const char *aggName, if (combineType != aggTransType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of combine function %s is not %s", - NameListToString(aggcombinefnName), - format_type_be(aggTransType)))); + errmsg("return type of combine function %s is not %s", + NameListToString(aggcombinefnName), + format_type_be(aggTransType)))); /* * A combine function to combine INTERNAL states must accept nulls and @@ -440,8 +440,9 @@ AggregateCreate(const char *aggName, } /* - * Validate the serialization function, if present. We must ensure that the - * return type of this function is the same as the specified serialType. + * Validate the serialization function, if present. We must ensure that + * the return type of this function is the same as the specified + * serialType. */ if (aggserialfnName) { @@ -454,9 +455,9 @@ AggregateCreate(const char *aggName, if (rettype != aggSerialType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of serialization function %s is not %s", - NameListToString(aggserialfnName), - format_type_be(aggSerialType)))); + errmsg("return type of serialization function %s is not %s", + NameListToString(aggserialfnName), + format_type_be(aggSerialType)))); } /* @@ -474,9 +475,9 @@ AggregateCreate(const char *aggName, if (rettype != aggTransType) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("return type of deserialization function %s is not %s", - NameListToString(aggdeserialfnName), - format_type_be(aggTransType)))); + errmsg("return type of deserialization function %s is not %s", + NameListToString(aggdeserialfnName), + format_type_be(aggTransType)))); } /* diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index 7f45ba94070..f1fdc1a3603 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -338,14 +338,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, /* * There's little point in having a serialization/deserialization * function on aggregates that don't have an internal state, so let's - * just disallow this as it may help clear up any confusion or needless - * authoring of these functions. + * just disallow this as it may help clear up any confusion or + * needless authoring of these functions. */ if (transTypeId != INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("a serialization type must only be specified when the aggregate transition data type is %s", - format_type_be(INTERNALOID)))); + format_type_be(INTERNALOID)))); serialTypeId = typenameTypeId(NULL, serialType); @@ -358,15 +358,15 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, /* * We disallow INTERNAL serialType as the whole point of the - * serialized types is to allow the aggregate state to be output, - * and we cannot output INTERNAL. This check, combined with the one - * above ensures that the trans type and serialization type are not the + * serialized types is to allow the aggregate state to be output, and + * we cannot output INTERNAL. This check, combined with the one above + * ensures that the trans type and serialization type are not the * same. */ if (serialTypeId == INTERNALOID) ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("aggregate serialization data type cannot be %s", + errmsg("aggregate serialization data type cannot be %s", format_type_be(serialTypeId)))); /* @@ -392,14 +392,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, */ if (serialfuncName != NIL) ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("must specify serialization type when specifying serialization function"))); + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("must specify serialization type when specifying serialization function"))); /* likewise for the deserialization function */ if (deserialfuncName != NIL) ereport(ERROR, - (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), - errmsg("must specify serialization type when specifying deserialization function"))); + (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), + errmsg("must specify serialization type when specifying deserialization function"))); } /* @@ -493,7 +493,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, mfinalfuncExtraArgs, sortoperatorName, /* sort operator name */ transTypeId, /* transition data type */ - serialTypeId, /* serialization data type */ + serialTypeId, /* serialization data type */ transSpace, /* transition space */ mtransTypeId, /* transition data type */ mtransSpace, /* transition space */ diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 4b08cb832e9..1301bcb5e82 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -400,18 +400,17 @@ ExecRenameStmt(RenameStmt *stmt) ObjectAddress ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress) { - ObjectAddress address; - ObjectAddress refAddr; - Relation rel; + ObjectAddress address; + ObjectAddress refAddr; + Relation rel; address = get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname, - stmt->objargs, &rel, AccessExclusiveLock, false); + stmt->objargs, &rel, AccessExclusiveLock, false); /* - * If a relation was involved, it would have been opened and locked. - * We don't need the relation here, but we'll retain the lock until - * commit. + * If a relation was involved, it would have been opened and locked. We + * don't need the relation here, but we'll retain the lock until commit. */ if (rel) heap_close(rel, NoLock); @@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid) oldNspOid = DatumGetObjectId(namespace); /* - * If the object is already in the correct namespace, we don't need - * to do anything except fire the object access hook. + * If the object is already in the correct namespace, we don't need to do + * anything except fire the object access hook. */ if (oldNspOid == nspOid) { diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index 904dc1cbd19..9ac930ea8b1 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -138,7 +138,7 @@ RemoveAccessMethodById(Oid amOid) /* * get_am_type_oid - * Worker for various get_am_*_oid variants + * Worker for various get_am_*_oid variants * * If missing_ok is false, throw an error if access method not found. If * true, just return InvalidOid. @@ -188,7 +188,7 @@ get_index_am_oid(const char *amname, bool missing_ok) /* * get_am_oid - given an access method name, look up its OID. - * The type is not checked. + * The type is not checked. */ Oid get_am_oid(const char *amname, bool missing_ok) diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 97059e59c82..5fcedd78554 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -570,7 +570,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params, */ if (!inh) { - BlockNumber relallvisible; + BlockNumber relallvisible; visibilitymap_count(onerel, &relallvisible, NULL); diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index ad9b8ba156c..175d4ab6850 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -85,8 +85,8 @@ CreateConversionCommand(CreateConversionStmt *stmt) if (get_func_rettype(funcoid) != VOIDOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("encoding conversion function %s must return type %s", - NameListToString(func_name), "void"))); + errmsg("encoding conversion function %s must return type %s", + NameListToString(func_name), "void"))); /* Check we have EXECUTE rights for the function */ aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 28dcd340017..f45b3304ae9 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -875,7 +875,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) if (is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY FROM not supported with row-level security"), + errmsg("COPY FROM not supported with row-level security"), errhint("Use INSERT statements instead."))); /* Build target list */ @@ -1399,16 +1399,16 @@ BeginCopy(bool is_from, { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DO INSTEAD NOTHING rules are not supported for COPY"))); + errmsg("DO INSTEAD NOTHING rules are not supported for COPY"))); } else if (list_length(rewritten) > 1) { - ListCell *lc; + ListCell *lc; /* examine queries to determine which error message to issue */ foreach(lc, rewritten) { - Query *q = (Query *) lfirst(lc); + Query *q = (Query *) lfirst(lc); if (q->querySource == QSRC_QUAL_INSTEAD_RULE) ereport(ERROR, @@ -1417,7 +1417,7 @@ BeginCopy(bool is_from, if (q->querySource == QSRC_NON_INSTEAD_RULE) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("DO ALSO rules are not supported for the COPY"))); + errmsg("DO ALSO rules are not supported for the COPY"))); } ereport(ERROR, @@ -1448,8 +1448,8 @@ BeginCopy(bool is_from, query->commandType == CMD_DELETE); ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY query must have a RETURNING clause"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("COPY query must have a RETURNING clause"))); } /* plan the query */ diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 9d84b79ea09..e78e3b5b743 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -1419,7 +1419,7 @@ CreateExtensionInternal(CreateExtensionStmt *stmt, List *parents) CreateExtensionStmt *ces; ListCell *lc; ObjectAddress addr; - List *cascade_parents; + List *cascade_parents; /* Check extension name validity before trying to cascade */ check_valid_extension_name(curreq); diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 804bab2e1f5..eb531afd491 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -487,7 +487,7 @@ lookup_fdw_handler_func(DefElem *handler) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("function %s must return type %s", - NameListToString((List *) handler->arg), "fdw_handler"))); + NameListToString((List *) handler->arg), "fdw_handler"))); return handlerOid; } diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 62e61a26749..6cddcbd02c3 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -217,21 +217,20 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, RelationGetRelationName(matviewRel)); /* - * Check that there is a unique index with no WHERE clause on - * one or more columns of the materialized view if CONCURRENTLY - * is specified. + * Check that there is a unique index with no WHERE clause on one or more + * columns of the materialized view if CONCURRENTLY is specified. */ if (concurrent) { - List *indexoidlist = RelationGetIndexList(matviewRel); - ListCell *indexoidscan; + List *indexoidlist = RelationGetIndexList(matviewRel); + ListCell *indexoidscan; bool hasUniqueIndex = false; foreach(indexoidscan, indexoidlist) { Oid indexoid = lfirst_oid(indexoidscan); Relation indexRel; - Form_pg_index indexStruct; + Form_pg_index indexStruct; indexRel = index_open(indexoid, AccessShareLock); indexStruct = indexRel->rd_index; @@ -255,9 +254,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, if (!hasUniqueIndex) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot refresh materialized view \"%s\" concurrently", - quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), - RelationGetRelationName(matviewRel))), + errmsg("cannot refresh materialized view \"%s\" concurrently", + quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)), + RelationGetRelationName(matviewRel))), errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view."))); } @@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner, /* * There must be at least one unique index on the matview. * - * ExecRefreshMatView() checks that after taking the exclusive lock on - * the matview. So at least one unique index is guaranteed to exist here + * ExecRefreshMatView() checks that after taking the exclusive lock on the + * matview. So at least one unique index is guaranteed to exist here * because the lock is still being held. */ Assert(foundUniqueIndex); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index 11036241212..67d08d862b6 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -275,8 +275,8 @@ ValidateRestrictionEstimator(List *restrictionName) if (get_func_rettype(restrictionOid) != FLOAT8OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("restriction estimator function %s must return type %s", - NameListToString(restrictionName), "float8"))); + errmsg("restriction estimator function %s must return type %s", + NameListToString(restrictionName), "float8"))); /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE); @@ -321,8 +321,8 @@ ValidateJoinEstimator(List *joinName) if (get_func_rettype(joinOid) != FLOAT8OID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("join estimator function %s must return type %s", - NameListToString(joinName), "float8"))); + errmsg("join estimator function %s must return type %s", + NameListToString(joinName), "float8"))); /* Require EXECUTE rights for the estimator */ aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE); diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 93d15e477af..bc2e4af82a3 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -496,7 +496,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* Must own relation. */ if (pg_class_ownercheck(relid, GetUserId())) - noperm = false; /* user is allowed to modify this policy */ + noperm = false; /* user is allowed to modify this policy */ else ereport(WARNING, (errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED), @@ -511,15 +511,16 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) */ if (!noperm && num_roles > 0) { - int i, j; + int i, + j; Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles); Datum *role_oids; char *qual_value; Node *qual_expr; - List *qual_parse_rtable = NIL; + List *qual_parse_rtable = NIL; char *with_check_value; Node *with_check_qual; - List *with_check_parse_rtable = NIL; + List *with_check_parse_rtable = NIL; Datum values[Natts_pg_policy]; bool isnull[Natts_pg_policy]; bool replaces[Natts_pg_policy]; @@ -536,15 +537,14 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* * All of the dependencies will be removed from the policy and then - * re-added. In order to get them correct, we need to extract out - * the expressions in the policy and construct a parsestate just - * enough to build the range table(s) to then pass to - * recordDependencyOnExpr(). + * re-added. In order to get them correct, we need to extract out the + * expressions in the policy and construct a parsestate just enough to + * build the range table(s) to then pass to recordDependencyOnExpr(). */ /* Get policy qual, to update dependencies */ value_datum = heap_getattr(tuple, Anum_pg_policy_polqual, - RelationGetDescr(pg_policy_rel), &attr_isnull); + RelationGetDescr(pg_policy_rel), &attr_isnull); if (!attr_isnull) { ParseState *qual_pstate; @@ -566,7 +566,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) /* Get WITH CHECK qual, to update dependencies */ value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck, - RelationGetDescr(pg_policy_rel), &attr_isnull); + RelationGetDescr(pg_policy_rel), &attr_isnull); if (!attr_isnull) { ParseState *with_check_pstate; @@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id) heap_close(pg_policy_rel, RowExclusiveLock); - return(noperm || num_roles > 0); + return (noperm || num_roles > 0); } /* @@ -996,8 +996,8 @@ AlterPolicy(AlterPolicyStmt *stmt) /* Get policy command */ polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd, - RelationGetDescr(pg_policy_rel), - &polcmd_isnull); + RelationGetDescr(pg_policy_rel), + &polcmd_isnull); Assert(!polcmd_isnull); polcmd = DatumGetChar(polcmd_datum); @@ -1029,15 +1029,15 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Oid *roles; + Oid *roles; Datum roles_datum; bool attr_isnull; ArrayType *policy_roles; /* - * We need to pull the set of roles this policy applies to from - * what's in the catalog, so that we can recreate the dependencies - * correctly for the policy. + * We need to pull the set of roles this policy applies to from what's + * in the catalog, so that we can recreate the dependencies correctly + * for the policy. */ roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles, @@ -1065,13 +1065,13 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Datum value_datum; - bool attr_isnull; + Datum value_datum; + bool attr_isnull; /* * We need to pull the USING expression and build the range table for - * the policy from what's in the catalog, so that we can recreate - * the dependencies correctly for the policy. + * the policy from what's in the catalog, so that we can recreate the + * dependencies correctly for the policy. */ /* Check if the policy has a USING expr */ @@ -1106,8 +1106,8 @@ AlterPolicy(AlterPolicyStmt *stmt) } else { - Datum value_datum; - bool attr_isnull; + Datum value_datum; + bool attr_isnull; /* * We need to pull the WITH CHECK expression and build the range table diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 0515f4d3df9..761d08f604b 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -114,8 +114,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (funcrettype != LANGUAGE_HANDLEROID) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type %s", - NameListToString(funcname), "language_handler"))); + errmsg("function %s must return type %s", + NameListToString(funcname), "language_handler"))); } else { @@ -285,8 +285,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type %s", - NameListToString(stmt->plhandler), "language_handler"))); + errmsg("function %s must return type %s", + NameListToString(stmt->plhandler), "language_handler"))); } /* validate the inline function */ diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 6f728ff0fc9..99a659a1027 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * can skip this for internally generated triggers, since the name * modification above should be sufficient. * - * NOTE that this is cool only because we have ShareRowExclusiveLock on the - * relation, so the trigger set won't be changing underneath us. + * NOTE that this is cool only because we have ShareRowExclusiveLock on + * the relation, so the trigger set won't be changing underneath us. */ if (!isInternal) { diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 71d4df9c797..ce042110679 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -450,8 +450,8 @@ DefineType(List *names, List *parameters) { /* backwards-compatibility hack */ ereport(WARNING, - (errmsg("changing return type of function %s from %s to %s", - NameListToString(inputName), "opaque", typeName))); + (errmsg("changing return type of function %s from %s to %s", + NameListToString(inputName), "opaque", typeName))); SetFunctionReturnType(inputOid, typoid); } else @@ -467,15 +467,15 @@ DefineType(List *names, List *parameters) { /* backwards-compatibility hack */ ereport(WARNING, - (errmsg("changing return type of function %s from %s to %s", - NameListToString(outputName), "opaque", "cstring"))); + (errmsg("changing return type of function %s from %s to %s", + NameListToString(outputName), "opaque", "cstring"))); SetFunctionReturnType(outputOid, CSTRINGOID); } else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type output function %s must return type %s", - NameListToString(outputName), "cstring"))); + errmsg("type output function %s must return type %s", + NameListToString(outputName), "cstring"))); } if (receiveOid) { @@ -492,8 +492,8 @@ DefineType(List *names, List *parameters) if (resulttype != BYTEAOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type send function %s must return type %s", - NameListToString(sendName), "bytea"))); + errmsg("type send function %s must return type %s", + NameListToString(sendName), "bytea"))); } /* @@ -1888,8 +1888,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) if (get_func_rettype(procOid) != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type analyze function %s must return type %s", - NameListToString(procname), "boolean"))); + errmsg("type analyze function %s must return type %s", + NameListToString(procname), "boolean"))); return procOid; } @@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry) typTup = (Form_pg_type) GETSTRUCT(tup); /* - * If it's a composite type, invoke ATExecChangeOwner so that we fix up the - * pg_class entry properly. That will call back to AlterTypeOwnerInternal - * to take care of the pg_type entry(s). + * If it's a composite type, invoke ATExecChangeOwner so that we fix up + * the pg_class entry properly. That will call back to + * AlterTypeOwnerInternal to take care of the pg_type entry(s). */ if (typTup->typtype == TYPTYPE_COMPOSITE) ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index f0ac636b9b7..b6ea95061d0 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -302,7 +302,7 @@ CreateRole(CreateRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change bypassrls attribute"))); + errmsg("must be superuser to change bypassrls attribute"))); } else { @@ -320,8 +320,8 @@ CreateRole(CreateRoleStmt *stmt) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - stmt->role), - errdetail("Role names starting with \"pg_\" are reserved."))); + stmt->role), + errdetail("Role names starting with \"pg_\" are reserved."))); /* * Check the pg_authid relation to be certain the role doesn't already @@ -977,7 +977,7 @@ DropRole(DropRoleStmt *stmt) if (rolspec->roletype != ROLESPEC_CSTRING) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot use special role specifier in DROP ROLE"))); + errmsg("cannot use special role specifier in DROP ROLE"))); role = rolspec->rolename; tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role)); @@ -1167,22 +1167,22 @@ RenameRole(const char *oldname, const char *newname) errmsg("current user cannot be renamed"))); /* - * Check that the user is not trying to rename a system role and - * not trying to rename a role into the reserved "pg_" namespace. + * Check that the user is not trying to rename a system role and not + * trying to rename a role into the reserved "pg_" namespace. */ if (IsReservedName(NameStr(authform->rolname))) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - NameStr(authform->rolname)), - errdetail("Role names starting with \"pg_\" are reserved."))); + NameStr(authform->rolname)), + errdetail("Role names starting with \"pg_\" are reserved."))); if (IsReservedName(newname)) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role name \"%s\" is reserved", - newname), - errdetail("Role names starting with \"pg_\" are reserved."))); + newname), + errdetail("Role names starting with \"pg_\" are reserved."))); /* make sure the new name doesn't exist */ if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname))) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 784c3e93564..0010ca9a801 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, } /* - * If the all-visible page is turned out to be all-frozen but not marked, - * we should so mark it. Note that all_frozen is only valid if all_visible - * is true, so we must check both. + * If the all-visible page is turned out to be all-frozen but not + * marked, we should so mark it. Note that all_frozen is only valid + * if all_visible is true, so we must check both. */ else if (all_visible_according_to_vm && all_visible && all_frozen && !VM_ALL_FROZEN(onerel, blkno, &vmbuffer)) @@ -1660,7 +1660,7 @@ should_attempt_truncation(LVRelStats *vacrelstats) possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; if (possibly_freeable > 0 && (possibly_freeable >= REL_TRUNCATE_MINIMUM || - possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) && + possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) && old_snapshot_threshold < 0) return true; else diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index f801faacd29..962d75db6e4 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source) ReleaseSysCache(roleTup); /* - * Verify that session user is allowed to become this role, but - * skip this in parallel mode, where we must blindly recreate the - * parallel leader's state. + * Verify that session user is allowed to become this role, but skip + * this in parallel mode, where we must blindly recreate the parallel + * leader's state. */ if (!InitializingParallelWorker && !is_member_of_role(GetSessionUserId(), roleid)) diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 0c8e9399052..4a978adea71 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node) return false; /* - * Parallel-aware nodes return a subset of the tuples in each worker, - * and in general we can't expect to have enough bookkeeping state to - * know which ones we returned in this worker as opposed to some other - * worker. + * Parallel-aware nodes return a subset of the tuples in each worker, and + * in general we can't expect to have enough bookkeeping state to know + * which ones we returned in this worker as opposed to some other worker. */ if (node->parallel_aware) return false; diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c index a2eeeb6f6cd..c819d19db42 100644 --- a/src/backend/executor/execIndexing.c +++ b/src/backend/executor/execIndexing.c @@ -725,7 +725,7 @@ retry: { TransactionId xwait; ItemPointerData ctid_wait; - XLTW_Oper reason_wait; + XLTW_Oper reason_wait; Datum existing_values[INDEX_MAX_KEYS]; bool existing_isnull[INDEX_MAX_KEYS]; char *error_new; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index b5ced388d20..32bb3f92054 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo, if (wco->polname != NULL) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy \"%s\" for table \"%s\"", - wco->polname, wco->relname))); + errmsg("new row violates row-level security policy \"%s\" for table \"%s\"", + wco->polname, wco->relname))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy for table \"%s\"", - wco->relname))); + errmsg("new row violates row-level security policy for table \"%s\"", + wco->relname))); break; case WCO_RLS_CONFLICT_CHECK: if (wco->polname != NULL) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"", - wco->polname, wco->relname))); + errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"", + wco->polname, wco->relname))); else ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("new row violates row-level security policy (USING expression) for table \"%s\"", - wco->relname))); + errmsg("new row violates row-level security policy (USING expression) for table \"%s\"", + wco->relname))); break; default: elog(ERROR, "unrecognized WCO kind: %u", wco->kind); diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index f03cd9b07b3..6de90705e48 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation typedef struct ExecParallelEstimateContext { ParallelContext *pcxt; - int nnodes; + int nnodes; } ExecParallelEstimateContext; /* Context object for ExecParallelInitializeDSM. */ @@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext { ParallelContext *pcxt; SharedExecutorInstrumentation *instrumentation; - int nnodes; + int nnodes; } ExecParallelInitializeDSMContext; /* Helper functions that run in the parallel leader. */ @@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate); static bool ExecParallelEstimate(PlanState *node, ExecParallelEstimateContext *e); static bool ExecParallelInitializeDSM(PlanState *node, - ExecParallelInitializeDSMContext *d); + ExecParallelInitializeDSMContext *d); static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize); static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation); + SharedExecutorInstrumentation *instrumentation); /* Helper functions that run in the parallel worker. */ static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc); @@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) /* Estimate space for tuple queues. */ shm_toc_estimate_chunk(&pcxt->estimator, - mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); + mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers)); shm_toc_estimate_keys(&pcxt->estimator, 1); /* - * Give parallel-aware nodes a chance to add to the estimates, and get - * a count of how many PlanState nodes there are. + * Give parallel-aware nodes a chance to add to the estimates, and get a + * count of how many PlanState nodes there are. */ e.pcxt = pcxt; e.nnodes = 0; @@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false); /* - * If instrumentation options were supplied, allocate space for the - * data. It only gets partially initialized here; the rest happens - * during ExecParallelInitializeDSM. + * If instrumentation options were supplied, allocate space for the data. + * It only gets partially initialized here; the rest happens during + * ExecParallelInitializeDSM. */ if (estate->es_instrument) { Instrumentation *instrument; - int i; + int i; instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len); instrumentation->instrument_options = estate->es_instrument; @@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) */ static bool ExecParallelRetrieveInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { Instrumentation *instrument; - int i; - int n; - int ibytes; - int plan_node_id = planstate->plan->plan_node_id; + int i; + int n; + int ibytes; + int plan_node_id = planstate->plan->plan_node_id; /* Find the instumentation for this node. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) @@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, void ExecParallelFinish(ParallelExecutorInfo *pei) { - int i; + int i; if (pei->finished) return; @@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver, */ static bool ExecParallelReportInstrumentation(PlanState *planstate, - SharedExecutorInstrumentation *instrumentation) + SharedExecutorInstrumentation *instrumentation) { - int i; - int plan_node_id = planstate->plan->plan_node_id; + int i; + int plan_node_id = planstate->plan->plan_node_id; Instrumentation *instrument; InstrEndLoop(planstate->instrument); /* * If we shuffled the plan_node_id values in ps_instrument into sorted - * order, we could use binary search here. This might matter someday - * if we're pushing down sufficiently large plan trees. For now, do it - * the slow, dumb way. + * order, we could use binary search here. This might matter someday if + * we're pushing down sufficiently large plan trees. For now, do it the + * slow, dumb way. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) if (instrumentation->plan_node_id[i] == plan_node_id) diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index cd93c045dcb..e02fba52329 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list, stmt = queryTree->utilityStmt; else stmt = (Node *) pg_plan_query(queryTree, - fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0, - NULL); + fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0, + NULL); /* Precheck all commands for validity in a function */ if (IsA(stmt, TransactionStmt)) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 0c1e4a3cb6e..c3a04ef7daa 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate, AggStatePerGroup pergroupstate, Datum *resultVal, bool *resultIsNull); static void finalize_partialaggregate(AggState *aggstate, - AggStatePerAgg peragg, - AggStatePerGroup pergroupstate, - Datum *resultVal, bool *resultIsNull); + AggStatePerAgg peragg, + AggStatePerGroup pergroupstate, + Datum *resultVal, bool *resultIsNull); static void prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet); @@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup) if (OidIsValid(pertrans->deserialfn_oid)) { /* - * Don't call a strict deserialization function with NULL input. - * A strict deserialization function and a null value means we skip - * calling the combine function for this state. We assume that this - * would be a waste of time and effort anyway so just skip it. + * Don't call a strict deserialization function with NULL input. A + * strict deserialization function and a null value means we skip + * calling the combine function for this state. We assume that + * this would be a waste of time and effort anyway so just skip + * it. */ if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0]) continue; else { - FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; - MemoryContext oldContext; + FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo; + MemoryContext oldContext; dsinfo->arg[0] = slot->tts_values[0]; dsinfo->argnull[0] = slot->tts_isnull[0]; @@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate, AggStatePerGroup pergroupstate, Datum *resultVal, bool *resultIsNull) { - AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno]; - MemoryContext oldContext; + AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno]; + MemoryContext oldContext; oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory); /* - * serialfn_oid will be set if we must serialize the input state - * before calling the combine function on the state. + * serialfn_oid will be set if we must serialize the input state before + * calling the combine function on the state. */ if (OidIsValid(pertrans->serialfn_oid)) { @@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate, else { FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo; + fcinfo->arg[0] = pergroupstate->transValue; fcinfo->argnull[0] = pergroupstate->transValueIsNull; @@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate, /* If result is pass-by-ref, make sure it is in the right context. */ if (!peragg->resulttypeByVal && !*resultIsNull && !MemoryContextContains(CurrentMemoryContext, - DatumGetPointer(*resultVal))) + DatumGetPointer(*resultVal))) *resultVal = datumCopy(*resultVal, peragg->resulttypeByVal, peragg->resulttypeLen); @@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * * 1. An aggregate function appears more than once in query: * - * SELECT SUM(x) FROM ... HAVING SUM(x) > 0 + * SELECT SUM(x) FROM ... HAVING SUM(x) > 0 * - * Since the aggregates are the identical, we only need to calculate - * the calculate it once. Both aggregates will share the same 'aggno' - * value. + * Since the aggregates are the identical, we only need to calculate + * the calculate it once. Both aggregates will share the same 'aggno' + * value. * * 2. Two different aggregate functions appear in the query, but the - * aggregates have the same transition function and initial value, but - * different final function: + * aggregates have the same transition function and initial value, but + * different final function: * - * SELECT SUM(x), AVG(x) FROM ... + * SELECT SUM(x), AVG(x) FROM ... * - * In this case we must create a new peragg for the varying aggregate, - * and need to call the final functions separately, but can share the - * same transition state. + * In this case we must create a new peragg for the varying aggregate, + * and need to call the final functions separately, but can share the + * same transition state. * * For either of these optimizations to be valid, the aggregate's * arguments must be the same, including any modifiers such as ORDER BY, @@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) */ existing_transno = find_compatible_pertrans(aggstate, aggref, transfn_oid, aggtranstype, - serialfn_oid, deserialfn_oid, - initValue, initValueIsNull, + serialfn_oid, deserialfn_oid, + initValue, initValueIsNull, same_input_transnos); if (existing_transno != -1) { @@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg, /* * The serialization and deserialization functions must match, if * present, as we're unable to share the trans state for aggregates - * which will serialize or deserialize into different formats. Remember - * that these will be InvalidOid if they're not required for this agg - * node. + * which will serialize or deserialize into different formats. + * Remember that these will be InvalidOid if they're not required for + * this agg node. */ if (aggserialfn != pertrans->serialfn_oid || aggdeserialfn != pertrans->deserialfn_oid) diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index 300f947d431..d886aaf64d6 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node) /* * If chgParam of subnode is not null then plan will be re-scanned by - * first ExecProcNode. outerPlan may also be NULL, in which case there - * is nothing to rescan at all. + * first ExecProcNode. outerPlan may also be NULL, in which case there is + * nothing to rescan at all. */ if (outerPlan != NULL && outerPlan->chgParam == NULL) ExecReScan(outerPlan); diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c index 3834ed678cb..313b2344540 100644 --- a/src/backend/executor/nodeGather.c +++ b/src/backend/executor/nodeGather.c @@ -138,8 +138,8 @@ ExecGather(GatherState *node) /* * Initialize the parallel context and workers on first execution. We do * this on first execution rather than during node initialization, as it - * needs to allocate large dynamic segment, so it is better to do if it - * is really needed. + * needs to allocate large dynamic segment, so it is better to do if it is + * really needed. */ if (!node->initialized) { @@ -147,8 +147,8 @@ ExecGather(GatherState *node) Gather *gather = (Gather *) node->ps.plan; /* - * Sometimes we might have to run without parallelism; but if - * parallel mode is active then we can try to fire up some workers. + * Sometimes we might have to run without parallelism; but if parallel + * mode is active then we can try to fire up some workers. */ if (gather->num_workers > 0 && IsInParallelMode()) { @@ -186,7 +186,7 @@ ExecGather(GatherState *node) } else { - /* No workers? Then never mind. */ + /* No workers? Then never mind. */ ExecShutdownGatherWorkers(node); } } @@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate) static HeapTuple gather_readnext(GatherState *gatherstate) { - int waitpos = gatherstate->nextreader; + int waitpos = gatherstate->nextreader; for (;;) { @@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate) tup = TupleQueueReaderNext(reader, true, &readerdone); /* - * If this reader is done, remove it. If all readers are done, - * clean up remaining worker state. + * If this reader is done, remove it. If all readers are done, clean + * up remaining worker state. */ if (readerdone) { @@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node) /* Shut down tuple queue readers before shutting down workers. */ if (node->reader != NULL) { - int i; + int i; for (i = 0; i < node->nreaders; ++i) DestroyTupleQueueReader(node->reader[i]); @@ -452,10 +452,10 @@ void ExecReScanGather(GatherState *node) { /* - * Re-initialize the parallel workers to perform rescan of relation. - * We want to gracefully shutdown all the workers so that they - * should be able to propagate any error or other information to master - * backend before dying. Parallel context will be reused for rescan. + * Re-initialize the parallel workers to perform rescan of relation. We + * want to gracefully shutdown all the workers so that they should be able + * to propagate any error or other information to master backend before + * dying. Parallel context will be reused for rescan. */ ExecShutdownGatherWorkers(node); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index e62c8aad657..af7b26c0ef0 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate, /* * Note that it is possible that the target tuple has been modified in * this session, after the above heap_lock_tuple. We choose to not error - * out in that case, in line with ExecUpdate's treatment of similar - * cases. This can happen if an UPDATE is triggered from within - * ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by - * selecting from a wCTE in the ON CONFLICT's SET. + * out in that case, in line with ExecUpdate's treatment of similar cases. + * This can happen if an UPDATE is triggered from within ExecQual(), + * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a + * wCTE in the ON CONFLICT's SET. */ /* Execute UPDATE with projection */ @@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* Initialize the usesFdwDirectModify flag */ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, - node->fdwDirectModifyPlans); + node->fdwDirectModifyPlans); /* * Verify result relation is a valid target for the current operation diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index f12921d1889..00bf3a58b1a 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -65,8 +65,8 @@ SeqNext(SeqScanState *node) if (scandesc == NULL) { /* - * We reach here if the scan is not parallel, or if we're executing - * a scan that was intended to be parallel serially. + * We reach here if the scan is not parallel, or if we're executing a + * scan that was intended to be parallel serially. */ scandesc = heap_beginscan(node->ss.ss_currentRelation, estate->es_snapshot, @@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags) * open that relation and acquire appropriate lock on it. */ currentRelation = ExecOpenScanRelation(estate, - ((SeqScan *) node->ss.ps.plan)->scanrelid, + ((SeqScan *) node->ss.ps.plan)->scanrelid, eflags); node->ss.ss_currentRelation = currentRelation; @@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node) scan = node->ss.ss_currentScanDesc; if (scan != NULL) - heap_rescan(scan, /* scan desc */ - NULL); /* new scan keys */ + heap_rescan(scan, /* scan desc */ + NULL); /* new scan keys */ ExecScanReScan((ScanState *) node); } @@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt) { EState *estate = node->ss.ps.state; - ParallelHeapScanDesc pscan; + ParallelHeapScanDesc pscan; pscan = shm_toc_allocate(pcxt->toc, node->pscan_len); heap_parallelscan_initialize(pscan, @@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node, void ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc) { - ParallelHeapScanDesc pscan; + ParallelHeapScanDesc pscan; pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id); node->ss.ss_currentScanDesc = diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index f06eebee0cd..d4c88a1f0ef 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, /* build expression trees using actual argument & result types */ build_aggregate_transfn_expr(inputTypes, numArguments, - 0, /* no ordered-set window functions yet */ - false, /* no variadic window functions yet */ + 0, /* no ordered-set window functions yet */ + false, /* no variadic window functions yet */ wfunc->wintype, wfunc->inputcollid, transfn_oid, diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index 8abb1f16e45..a729372c740 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -44,13 +44,13 @@ typedef enum TQUEUE_REMAP_ARRAY, /* array */ TQUEUE_REMAP_RANGE, /* range */ TQUEUE_REMAP_RECORD /* composite type, named or anonymous */ -} RemapClass; +} RemapClass; typedef struct { int natts; RemapClass mapping[FLEXIBLE_ARRAY_MEMBER]; -} RemapInfo; +} RemapInfo; typedef struct { @@ -61,13 +61,13 @@ typedef struct char mode; TupleDesc tupledesc; RemapInfo *remapinfo; -} TQueueDestReceiver; +} TQueueDestReceiver; typedef struct RecordTypemodMap { int remotetypmod; int localtypmod; -} RecordTypemodMap; +} RecordTypemodMap; struct TupleQueueReader { @@ -81,19 +81,19 @@ struct TupleQueueReader #define TUPLE_QUEUE_MODE_CONTROL 'c' #define TUPLE_QUEUE_MODE_DATA 'd' -static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, +static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value); -static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value); -static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value); -static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value); -static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, +static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value); +static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value); +static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value); +static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod, TupleDesc tupledesc); static void TupleQueueHandleControlMessage(TupleQueueReader *reader, Size nbytes, char *data); static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader, Size nbytes, HeapTupleHeader data); static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, - TupleDesc tupledesc, RemapInfo * remapinfo, + TupleDesc tupledesc, RemapInfo *remapinfo, HeapTuple tuple); static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass, Datum value); @@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) * Invoke the appropriate walker function based on the given RemapClass. */ static void -tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) +tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value) { check_stack_depth(); @@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value) * contained therein. */ static void -tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value) { HeapTupleHeader tup; Oid typeid; @@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value) * contained therein. */ static void -tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value) { ArrayType *arr = DatumGetArrayTypeP(value); Oid typeid = ARR_ELEMTYPE(arr); @@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value) * contained therein. */ static void -tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) +tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value) { RangeType *range = DatumGetRangeType(value); Oid typeid = RangeTypeGetOid(range); @@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value) * already done so previously. */ static void -tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod, +tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod, TupleDesc tupledesc) { StringInfoData buf; @@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader, */ static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc, - RemapInfo * remapinfo, HeapTuple tuple) + RemapInfo *remapinfo, HeapTuple tuple) { Datum *values; bool *isnull; diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 43bb1343550..7d8fc3e54d0 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -1875,7 +1875,7 @@ CheckPAMAuth(Port *port, char *user, char *password) retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen, hostinfo, sizeof(hostinfo), NULL, 0, - port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV); + port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV); if (retval != 0) { ereport(WARNING, @@ -1934,7 +1934,7 @@ CheckPAMAuth(Port *port, char *user, char *password) { ereport(LOG, (errmsg("pam_set_item(PAM_RHOST) failed: %s", - pam_strerror(pamh, retval)))); + pam_strerror(pamh, retval)))); pam_passwd = NULL; return STATUS_ERROR; } @@ -1996,8 +1996,8 @@ CheckPAMAuth(Port *port, char *user, char *password) static int CheckBSDAuth(Port *port, char *user) { - char *passwd; - int retval; + char *passwd; + int retval; /* Send regular password request to client, and get the response */ sendAuthRequest(port, AUTH_REQ_PASSWORD); @@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port) radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier)); /* - * RADIUS password attributes are calculated as: - * e[0] = p[0] XOR MD5(secret + Request Authenticator) - * for the first group of 16 octets, and then: - * e[i] = p[i] XOR MD5(secret + e[i-1]) - * for the following ones (if necessary) + * RADIUS password attributes are calculated as: e[0] = p[0] XOR + * MD5(secret + Request Authenticator) for the first group of 16 octets, + * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones + * (if necessary) */ encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH; cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH); @@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port) for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH) { memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH); - /* .. and for subsequent iterations the result of the previous XOR (calculated below) */ + + /* + * .. and for subsequent iterations the result of the previous XOR + * (calculated below) + */ md5trailer = encryptedpassword + i; if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i)) @@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port) return STATUS_ERROR; } - for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++) + for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++) { if (j < strlen(passwd)) encryptedpassword[j] = passwd[j] ^ encryptedpassword[j]; diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index 95cceeed7ad..f6adb155c6e 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -241,8 +241,8 @@ be_tls_init(void) (buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO))) ereport(FATAL, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("private key file \"%s\" has group or world access", - ssl_key_file), + errmsg("private key file \"%s\" has group or world access", + ssl_key_file), errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root."))); #endif @@ -316,7 +316,7 @@ be_tls_init(void) else ereport(FATAL, (errmsg("could not load SSL certificate revocation list file \"%s\": %s", - ssl_crl_file, SSLerrmessage(ERR_get_error())))); + ssl_crl_file, SSLerrmessage(ERR_get_error())))); } } @@ -377,11 +377,12 @@ be_tls_open_server(Port *port) port->ssl_in_use = true; aloop: + /* * Prepare to call SSL_get_error() by clearing thread's OpenSSL error * queue. In general, the current thread's error queue must be empty - * before the TLS/SSL I/O operation is attempted, or SSL_get_error() - * will not work reliably. An extension may have failed to clear the + * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will + * not work reliably. An extension may have failed to clear the * per-thread error queue following another call to an OpenSSL I/O * routine. */ @@ -393,12 +394,11 @@ aloop: /* * Other clients of OpenSSL in the backend may fail to call - * ERR_get_error(), but we always do, so as to not cause problems - * for OpenSSL clients that don't call ERR_clear_error() - * defensively. Be sure that this happens by calling now. - * SSL_get_error() relies on the OpenSSL per-thread error queue - * being intact, so this is the earliest possible point - * ERR_get_error() may be called. + * ERR_get_error(), but we always do, so as to not cause problems for + * OpenSSL clients that don't call ERR_clear_error() defensively. Be + * sure that this happens by calling now. SSL_get_error() relies on + * the OpenSSL per-thread error queue being intact, so this is the + * earliest possible point ERR_get_error() may be called. */ ecode = ERR_get_error(); switch (err) diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 29297e72999..cdd07d577b0 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -140,26 +140,26 @@ retry: /* In blocking mode, wait until the socket is ready */ if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) { - WaitEvent event; + WaitEvent event; Assert(waitfor); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); - WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); + WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1); /* * If the postmaster has died, it's not safe to continue running, * because it is the postmaster's job to kill us if some other backend * exists uncleanly. Moreover, we won't run very well in this state; * helper processes like walwriter and the bgwriter will exit, so - * performance may be poor. Finally, if we don't exit, pg_ctl will - * be unable to restart the postmaster without manual intervention, - * so no new connections can be accepted. Exiting clears the deck - * for a postmaster restart. + * performance may be poor. Finally, if we don't exit, pg_ctl will be + * unable to restart the postmaster without manual intervention, so no + * new connections can be accepted. Exiting clears the deck for a + * postmaster restart. * - * (Note that we only make this check when we would otherwise sleep - * on our latch. We might still continue running for a while if the + * (Note that we only make this check when we would otherwise sleep on + * our latch. We might still continue running for a while if the * postmaster is killed in mid-query, or even through multiple queries * if we never have to wait for read. We don't want to burn too many * cycles checking for this very rare condition, and this should cause @@ -168,7 +168,7 @@ retry: if (event.events & WL_POSTMASTER_DEATH) ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), - errmsg("terminating connection due to unexpected postmaster exit"))); + errmsg("terminating connection due to unexpected postmaster exit"))); /* Handle interrupt. */ if (event.events & WL_LATCH_SET) @@ -241,19 +241,19 @@ retry: if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN)) { - WaitEvent event; + WaitEvent event; Assert(waitfor); ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL); - WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1); + WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1); /* See comments in secure_read. */ if (event.events & WL_POSTMASTER_DEATH) ereport(FATAL, (errcode(ERRCODE_ADMIN_SHUTDOWN), - errmsg("terminating connection due to unexpected postmaster exit"))); + errmsg("terminating connection due to unexpected postmaster exit"))); /* Handle interrupt. */ if (event.events & WL_LATCH_SET) diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 8d6eb0b7bbf..ba42753c067 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -1174,7 +1174,7 @@ pq_startmsgread(void) if (PqCommReadingMsg) ereport(FATAL, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("terminating connection because protocol synchronization was lost"))); + errmsg("terminating connection because protocol synchronization was lost"))); PqCommReadingMsg = true; } diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c index 350210b0064..3225c1fa0e7 100644 --- a/src/backend/libpq/pqmq.c +++ b/src/backend/libpq/pqmq.c @@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len) /* * If the message queue is already gone, just ignore the message. This - * doesn't necessarily indicate a problem; for example, DEBUG messages - * can be generated late in the shutdown sequence, after all DSMs have - * already been detached. + * doesn't necessarily indicate a problem; for example, DEBUG messages can + * be generated late in the shutdown sequence, after all DSMs have already + * been detached. */ if (pq_mq == NULL) return 0; diff --git a/src/backend/main/main.c b/src/backend/main/main.c index da86c6243f1..a13c446f891 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -270,19 +270,22 @@ startup_hacks(const char *progname) SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX); #if defined(_M_AMD64) && _MSC_VER == 1800 + /* - * Avoid crashing in certain floating-point operations if - * we were compiled for x64 with MS Visual Studio 2013 and - * are running on Windows prior to 7/2008R2 SP1 on an - * AVX2-capable CPU. + * Avoid crashing in certain floating-point operations if we were + * compiled for x64 with MS Visual Studio 2013 and are running on + * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU. * - * Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions + * Ref: + * https://connect.microsoft.com/VisualStudio/feedback/details/811093/v + * isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction + * s */ if (!IsWindows7SP1OrGreater()) { _set_FMA3_enable(0); } -#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */ +#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */ } #endif /* WIN32 */ diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 1ac51a7b2fc..5facd439cac 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate, return true; break; case T_CustomScan: - foreach (lc, ((CustomScanState *) planstate)->custom_ps) + foreach(lc, ((CustomScanState *) planstate)->custom_ps) { if (walker((PlanState *) lfirst(lc), context)) return true; diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index d07974d3b0c..d7d513e78f0 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -94,8 +94,8 @@ copyParamList(ParamListInfo from) Size EstimateParamListSpace(ParamListInfo paramLI) { - int i; - Size sz = sizeof(int); + int i; + Size sz = sizeof(int); if (paramLI == NULL || paramLI->numParams <= 0) return sz; @@ -119,7 +119,7 @@ EstimateParamListSpace(ParamListInfo paramLI) typeOid = prm->ptype; } - sz = add_size(sz, sizeof(Oid)); /* space for type OID */ + sz = add_size(sz, sizeof(Oid)); /* space for type OID */ sz = add_size(sz, sizeof(uint16)); /* space for pflags */ /* space for datum/isnull */ @@ -132,7 +132,7 @@ EstimateParamListSpace(ParamListInfo paramLI) typByVal = true; } sz = add_size(sz, - datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); + datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen)); } return sz; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index 6f28047d849..c401762a39b 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -1836,8 +1836,8 @@ _readCustomScan(void) READ_BITMAPSET_FIELD(custom_relids); /* Lookup CustomScanMethods by CustomName */ - token = pg_strtok(&length); /* skip methods: */ - token = pg_strtok(&length); /* CustomName */ + token = pg_strtok(&length); /* skip methods: */ + token = pg_strtok(&length); /* CustomName */ custom_name = nullable_string(token, length); methods = GetCustomScanMethods(custom_name, false); local_node->methods = methods; @@ -2227,11 +2227,12 @@ _readExtensibleNode(void) { const ExtensibleNodeMethods *methods; ExtensibleNode *local_node; - const char *extnodename; + const char *extnodename; + READ_TEMP_LOCALS(); - token = pg_strtok(&length); /* skip: extnodename */ - token = pg_strtok(&length); /* get extnodename */ + token = pg_strtok(&length); /* skip: extnodename */ + token = pg_strtok(&length); /* get extnodename */ extnodename = nullable_string(token, length); if (!extnodename) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 6deb2cf0c9c..ff5e39c1aad 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist) set_base_rel_consider_startup(root); /* - * Generate access paths for the base rels. set_base_rel_sizes also - * sets the consider_parallel flag for each baserel, if appropriate. + * Generate access paths for the base rels. set_base_rel_sizes also sets + * the consider_parallel flag for each baserel, if appropriate. */ set_base_rel_sizes(root); set_base_rel_pathlists(root); @@ -228,7 +228,7 @@ set_base_rel_consider_startup(PlannerInfo *root) /* * set_base_rel_sizes * Set the size estimates (rows and widths) for each base-relation entry. - * Also determine whether to consider parallel paths for base relations. + * Also determine whether to consider parallel paths for base relations. * * We do this in a separate pass over the base rels so that rowcount * estimates are available for parameterized path generation, and also so @@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, switch (rte->rtekind) { case RTE_RELATION: + /* * Currently, parallel workers can't access the leader's temporary * tables. We could possibly relax this if the wrote all of its @@ -528,7 +529,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, */ if (rte->tablesample != NULL) { - Oid proparallel = func_parallel(rte->tablesample->tsmhandler); + Oid proparallel = func_parallel(rte->tablesample->tsmhandler); if (proparallel != PROPARALLEL_SAFE) return; @@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_SUBQUERY: + /* * Subplans currently aren't passed to workers. Even if they - * were, the subplan might be using parallelism internally, and - * we can't support nested Gather nodes at present. Finally, - * we don't have a good way of knowing whether the subplan - * involves any parallel-restricted operations. It would be - * nice to relax this restriction some day, but it's going to - * take a fair amount of work. + * were, the subplan might be using parallelism internally, and we + * can't support nested Gather nodes at present. Finally, we + * don't have a good way of knowing whether the subplan involves + * any parallel-restricted operations. It would be nice to relax + * this restriction some day, but it's going to take a fair amount + * of work. */ return; @@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_VALUES: + /* * The data for a VALUES clause is stored in the plan tree itself, * so scanning it in a worker is fine. @@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, break; case RTE_CTE: + /* * CTE tuplestores aren't shared among parallel workers, so we * force all CTE scans to happen in the leader. Also, populating @@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, } /* - * If there's anything in baserestrictinfo that's parallel-restricted, - * we give up on parallelizing access to this relation. We could consider + * If there's anything in baserestrictinfo that's parallel-restricted, we + * give up on parallelizing access to this relation. We could consider * instead postponing application of the restricted quals until we're * above all the parallelism in the plan tree, but it's not clear that * this would be a win in very many cases, and it might be tricky to make @@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel, return; /* - * If the relation's outputs are not parallel-safe, we must give up. - * In the common case where the relation only outputs Vars, this check is + * If the relation's outputs are not parallel-safe, we must give up. In + * the common case where the relation only outputs Vars, this check is * very cheap; otherwise, we have to do more work. */ if (rel->reltarget_has_non_vars && @@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, int parallel_workers = 0; /* - * Decide on the numebr of workers to request for this append path. For - * now, we just use the maximum value from among the members. It + * Decide on the numebr of workers to request for this append path. + * For now, we just use the maximum value from among the members. It * might be useful to use a higher number if the Append node were * smart enough to spread out the workers, but it currently isn't. */ @@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * Run generate_gather_paths() for each just-processed joinrel. We * could not do this earlier because both regular and partial paths * can get added to a particular joinrel at multiple times within - * join_search_one_level. After that, we're done creating paths - * for the joinrel, so run set_cheapest(). + * join_search_one_level. After that, we're done creating paths for + * the joinrel, so run set_cheapest(). */ foreach(lc, root->join_rel_level[lev]) { diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 52df17fe694..ab8df76a6ed 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path) * We might not really need a Result node here. There are several ways * that this can happen. For example, MergeAppend doesn't project, so we * would have thought that we needed a projection to attach resjunk sort - * columns to its output ... but create_merge_append_plan might have - * added those same resjunk sort columns to both MergeAppend and its - * children. Alternatively, apply_projection_to_path might have created - * a projection path as the subpath of a Gather node even though the - * subpath was projection-capable. So, if the subpath is capable of - * projection or the desired tlist is the same expression-wise as the - * subplan's, just jam it in there. We'll have charged for a Result that - * doesn't actually appear in the plan, but that's better than having a - * Result we don't need. + * columns to its output ... but create_merge_append_plan might have added + * those same resjunk sort columns to both MergeAppend and its children. + * Alternatively, apply_projection_to_path might have created a projection + * path as the subpath of a Gather node even though the subpath was + * projection-capable. So, if the subpath is capable of projection or the + * desired tlist is the same expression-wise as the subplan's, just jam it + * in there. We'll have charged for a Result that doesn't actually appear + * in the plan, but that's better than having a Result we don't need. */ if (is_projection_capable_path(best_path->subpath) || tlist_same_exprs(tlist, subplan->targetlist)) @@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, /* * If a join between foreign relations was pushed down, remember it. The * push-down safety of the join depends upon the server and user mapping - * being same. That can change between planning and execution time, in which - * case the plan should be invalidated. + * being same. That can change between planning and execution time, in + * which case the plan should be invalidated. */ if (scan_relid == 0) root->glob->hasForeignJoin = true; @@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, /* * Replace any outer-relation variables with nestloop params in the qual, * fdw_exprs and fdw_recheck_quals expressions. We do this last so that - * the FDW doesn't have to be involved. (Note that parts of fdw_exprs - * or fdw_recheck_quals could have come from join clauses, so doing this + * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or + * fdw_recheck_quals could have come from join clauses, so doing this * beforehand on the scan_clauses wouldn't work.) We assume * fdw_scan_tlist contains no such variables. */ @@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, * 0, but there can be no Var with relid 0 in the rel's targetlist or the * restriction clauses, so we skip this in that case. Note that any such * columns in base relations that were joined are assumed to be contained - * in fdw_scan_tlist.) This is a bit of a kluge and might go away someday, - * so we intentionally leave it out of the API presented to FDWs. + * in fdw_scan_tlist.) This is a bit of a kluge and might go away + * someday, so we intentionally leave it out of the API presented to FDWs. */ scan_plan->fsSystemCol = false; if (scan_relid > 0) @@ -5899,7 +5898,7 @@ make_gather(List *qptlist, plan->righttree = NULL; node->num_workers = nworkers; node->single_copy = single_copy; - node->invisible = false; + node->invisible = false; return node; } diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index ba0c0ecae9c..54c04404361 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -108,10 +108,10 @@ static double get_number_of_groups(PlannerInfo *root, List *rollup_lists, List *rollup_groupclauses); static void set_grouped_rel_consider_parallel(PlannerInfo *root, - RelOptInfo *grouped_rel, - PathTarget *target); + RelOptInfo *grouped_rel, + PathTarget *target); static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs, - double dNumGroups); + double dNumGroups); static RelOptInfo *create_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, PathTarget *target, @@ -141,7 +141,7 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root, static PathTarget *make_group_input_target(PlannerInfo *root, PathTarget *final_target); static PathTarget *make_partialgroup_input_target(PlannerInfo *root, - PathTarget *final_target); + PathTarget *final_target); static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist); static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists); static PathTarget *make_window_input_target(PlannerInfo *root, @@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update, * findable from the PlannerInfo struct; anything else the FDW wants * to know should be obtainable via "root". * - * Note: CustomScan providers, as well as FDWs that don't want to - * use this hook, can use the create_upper_paths_hook; see below. + * Note: CustomScan providers, as well as FDWs that don't want to use + * this hook, can use the create_upper_paths_hook; see below. */ if (current_rel->fdwroutine && current_rel->fdwroutine->GetForeignUpperPaths) @@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel, /* * All that's left to check now is to make sure all aggregate functions - * support partial mode. If there's no aggregates then we can skip checking - * that. + * support partial mode. If there's no aggregates then we can skip + * checking that. */ if (!parse->hasAggs) grouped_rel->consider_parallel = true; @@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root, /* * Determine whether it's possible to perform sort-based implementations - * of grouping. (Note that if groupClause is empty, grouping_is_sortable() - * is trivially true, and all the pathkeys_contained_in() tests will - * succeed too, so that we'll consider every surviving input path.) + * of grouping. (Note that if groupClause is empty, + * grouping_is_sortable() is trivially true, and all the + * pathkeys_contained_in() tests will succeed too, so that we'll consider + * every surviving input path.) */ can_sort = grouping_is_sortable(parse->groupClause); @@ -3408,7 +3409,7 @@ create_grouping_paths(PlannerInfo *root, */ if (grouped_rel->consider_parallel) { - Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); + Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); /* * Build target list for partial aggregate paths. We cannot reuse the @@ -3471,27 +3472,27 @@ create_grouping_paths(PlannerInfo *root, if (parse->hasAggs) add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups, - false, - false, - true)); + create_agg_path(root, + grouped_rel, + path, + partial_grouping_target, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + parse->groupClause, + NIL, + &agg_partial_costs, + dNumPartialGroups, + false, + false, + true)); else add_partial_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - partial_grouping_target, - parse->groupClause, - NIL, - dNumPartialGroups)); + create_group_path(root, + grouped_rel, + path, + partial_grouping_target, + parse->groupClause, + NIL, + dNumPartialGroups)); } } } @@ -3513,18 +3514,18 @@ create_grouping_paths(PlannerInfo *root, if (hashaggtablesize < work_mem * 1024L) { add_partial_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - cheapest_partial_path, - partial_grouping_target, - AGG_HASHED, - parse->groupClause, - NIL, - &agg_partial_costs, - dNumPartialGroups, - false, - false, - true)); + create_agg_path(root, + grouped_rel, + cheapest_partial_path, + partial_grouping_target, + AGG_HASHED, + parse->groupClause, + NIL, + &agg_partial_costs, + dNumPartialGroups, + false, + false, + true)); } } } @@ -3616,13 +3617,13 @@ create_grouping_paths(PlannerInfo *root, /* * Now generate a complete GroupAgg Path atop of the cheapest partial - * path. We need only bother with the cheapest path here, as the output - * of Gather is never sorted. + * path. We need only bother with the cheapest path here, as the + * output of Gather is never sorted. */ if (grouped_rel->partial_pathlist) { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); - double total_groups = path->rows * path->parallel_workers; + Path *path = (Path *) linitial(grouped_rel->partial_pathlist); + double total_groups = path->rows * path->parallel_workers; path = (Path *) create_gather_path(root, grouped_rel, @@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root, &total_groups); /* - * Gather is always unsorted, so we'll need to sort, unless there's - * no GROUP BY clause, in which case there will only be a single - * group. + * Gather is always unsorted, so we'll need to sort, unless + * there's no GROUP BY clause, in which case there will only be a + * single group. */ if (parse->groupClause) path = (Path *) create_sort_path(root, @@ -3645,27 +3646,27 @@ create_grouping_paths(PlannerInfo *root, if (parse->hasAggs) add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - parse->groupClause ? AGG_SORTED : AGG_PLAIN, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups, - true, - true, - true)); + create_agg_path(root, + grouped_rel, + path, + target, + parse->groupClause ? AGG_SORTED : AGG_PLAIN, + parse->groupClause, + (List *) parse->havingQual, + &agg_final_costs, + dNumGroups, + true, + true, + true)); else add_path(grouped_rel, (Path *) - create_group_path(root, - grouped_rel, - path, - target, - parse->groupClause, - (List *) parse->havingQual, - dNumGroups)); + create_group_path(root, + grouped_rel, + path, + target, + parse->groupClause, + (List *) parse->havingQual, + dNumGroups)); } } @@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root, /* * Provided that the estimated size of the hashtable does not exceed * work_mem, we'll generate a HashAgg Path, although if we were unable - * to sort above, then we'd better generate a Path, so that we at least - * have one. + * to sort above, then we'd better generate a Path, so that we at + * least have one. */ if (hashaggtablesize < work_mem * 1024L || grouped_rel->pathlist == NIL) { /* - * We just need an Agg over the cheapest-total input path, since input - * order won't matter. + * We just need an Agg over the cheapest-total input path, since + * input order won't matter. */ add_path(grouped_rel, (Path *) create_agg_path(root, grouped_rel, @@ -3704,12 +3705,12 @@ create_grouping_paths(PlannerInfo *root, /* * Generate a HashAgg Path atop of the cheapest partial path. Once - * again, we'll only do this if it looks as though the hash table won't - * exceed work_mem. + * again, we'll only do this if it looks as though the hash table + * won't exceed work_mem. */ if (grouped_rel->partial_pathlist) { - Path *path = (Path *) linitial(grouped_rel->partial_pathlist); + Path *path = (Path *) linitial(grouped_rel->partial_pathlist); hashaggtablesize = estimate_hashagg_tablesize(path, &agg_final_costs, @@ -3717,7 +3718,7 @@ create_grouping_paths(PlannerInfo *root, if (hashaggtablesize < work_mem * 1024L) { - double total_groups = path->rows * path->parallel_workers; + double total_groups = path->rows * path->parallel_workers; path = (Path *) create_gather_path(root, grouped_rel, @@ -3727,18 +3728,18 @@ create_grouping_paths(PlannerInfo *root, &total_groups); add_path(grouped_rel, (Path *) - create_agg_path(root, - grouped_rel, - path, - target, - AGG_HASHED, - parse->groupClause, - (List *) parse->havingQual, - &agg_final_costs, - dNumGroups, - true, - true, - true)); + create_agg_path(root, + grouped_rel, + path, + target, + AGG_HASHED, + parse->groupClause, + (List *) parse->havingQual, + &agg_final_costs, + dNumGroups, + true, + true, + true)); } } } diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 266e83055b2..9b690cf66e9 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist, continue; if (aggref->aggvariadic != tlistaggref->aggvariadic) continue; + /* * it would be harmless to compare aggcombine and aggpartial, but * it's also unnecessary diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 759566ad461..e7909eb5d59 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -101,7 +101,7 @@ typedef struct } has_parallel_hazard_arg; static bool aggregates_allow_partial_walker(Node *node, - partial_agg_context *context); + partial_agg_context *context); static bool contain_agg_clause_walker(Node *node, void *context); static bool count_agg_clauses_walker(Node *node, count_agg_clauses_context *context); @@ -112,9 +112,9 @@ static bool contain_mutable_functions_walker(Node *node, void *context); static bool contain_volatile_functions_walker(Node *node, void *context); static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context); static bool has_parallel_hazard_walker(Node *node, - has_parallel_hazard_arg *context); + has_parallel_hazard_arg *context); static bool parallel_too_dangerous(char proparallel, - has_parallel_hazard_arg *context); + has_parallel_hazard_arg *context); static bool typeid_is_temp(Oid typeid); static bool contain_nonstrict_functions_walker(Node *node, void *context); static bool contain_leaked_vars_walker(Node *node, void *context); @@ -446,7 +446,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) if (aggref->aggdistinct || aggref->aggorder) { context->allowedtype = PAT_DISABLED; - return true; /* abort search */ + return true; /* abort search */ } aggTuple = SearchSysCache1(AGGFNOID, ObjectIdGetDatum(aggref->aggfnoid)); @@ -463,7 +463,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) { ReleaseSysCache(aggTuple); context->allowedtype = PAT_DISABLED; - return true; /* abort search */ + return true; /* abort search */ } /* @@ -479,7 +479,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context) context->allowedtype = PAT_INTERNAL_ONLY; ReleaseSysCache(aggTuple); - return false; /* continue searching */ + return false; /* continue searching */ } return expression_tree_walker(node, aggregates_allow_partial_walker, (void *) context); @@ -1354,7 +1354,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) bool has_parallel_hazard(Node *node, bool allow_restricted) { - has_parallel_hazard_arg context; + has_parallel_hazard_arg context; context.allow_restricted = allow_restricted; return has_parallel_hazard_walker(node, &context); @@ -1371,16 +1371,16 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) * recurse through Query objects to as to locate parallel-unsafe * constructs anywhere in the tree. * - * Later, we'll be called again for specific quals, possibly after - * some planning has been done, we may encounter SubPlan, SubLink, - * or AlternativeSubLink nodes. Currently, there's no need to recurse - * through these; they can't be unsafe, since we've already cleared - * the entire query of unsafe operations, and they're definitely + * Later, we'll be called again for specific quals, possibly after some + * planning has been done, we may encounter SubPlan, SubLink, or + * AlternativeSubLink nodes. Currently, there's no need to recurse + * through these; they can't be unsafe, since we've already cleared the + * entire query of unsafe operations, and they're definitely * parallel-restricted. */ if (IsA(node, Query)) { - Query *query = (Query *) node; + Query *query = (Query *) node; if (query->rowMarks != NULL) return true; @@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) has_parallel_hazard_walker, context, 0); } - else if (IsA(node, SubPlan) || IsA(node, SubLink) || - IsA(node, AlternativeSubPlan) || IsA(node, Param)) + else if (IsA(node, SubPlan) ||IsA(node, SubLink) || + IsA(node, AlternativeSubPlan) ||IsA(node, Param)) { /* - * Since we don't have the ability to push subplans down to workers - * at present, we treat subplan references as parallel-restricted. + * Since we don't have the ability to push subplans down to workers at + * present, we treat subplan references as parallel-restricted. */ if (!context->allow_restricted) return true; @@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) if (IsA(node, RestrictInfo)) { RestrictInfo *rinfo = (RestrictInfo *) node; + return has_parallel_hazard_walker((Node *) rinfo->clause, context); } /* * It is an error for a parallel worker to touch a temporary table in any - * way, so we can't handle nodes whose type is the rowtype of such a table. + * way, so we can't handle nodes whose type is the rowtype of such a + * table. */ if (!context->allow_restricted) { @@ -1534,7 +1536,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) foreach(opid, rcexpr->opnos) { - Oid opfuncid = get_opcode(lfirst_oid(opid)); + Oid opfuncid = get_opcode(lfirst_oid(opid)); + if (parallel_too_dangerous(func_parallel(opfuncid), context)) return true; } @@ -1558,7 +1561,7 @@ parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context) static bool typeid_is_temp(Oid typeid) { - Oid relid = get_typ_typrelid(typeid); + Oid relid = get_typ_typrelid(typeid); if (!OidIsValid(relid)) return false; @@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context) /* * WHERE CURRENT OF doesn't contain function calls. Moreover, it * is important that this can be pushed down into a - * security_barrier view, since the planner must always generate - * a TID scan when CURRENT OF is present -- c.f. cost_tidscan. + * security_barrier view, since the planner must always generate a + * TID scan when CURRENT OF is present -- c.f. cost_tidscan. */ return false; diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index de849808c31..6aa81921809 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -709,7 +709,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, AttrNumber natt; Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */ Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */ - int nplain = 0; /* # plain attrs observed */ + int nplain = 0; /* # plain attrs observed */ /* * If inference specification element lacks collation/opclass, then no diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 2def06dd922..91cd2b506fb 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind) rel->consider_startup = (root->tuple_fraction > 0); rel->consider_param_startup = false; /* might get changed later */ rel->consider_parallel = false; /* might get changed later */ - rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */ + rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */ rel->reltarget = create_empty_pathtarget(); rel->reltarget_has_non_vars = false; rel->pathlist = NIL; diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index 465cb9e8512..339a5b3f250 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -776,11 +776,11 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target) void apply_partialaggref_adjustment(PathTarget *target) { - ListCell *lc; + ListCell *lc; foreach(lc, target->exprs) { - Aggref *aggref = (Aggref *) lfirst(lc); + Aggref *aggref = (Aggref *) lfirst(lc); if (IsA(aggref, Aggref)) { diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 81332b57d93..1e3ecbc51ef 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -3083,8 +3083,8 @@ errorMissingColumn(ParseState *pstate, errmsg("column %s.%s does not exist", relname, colname) : errmsg("column \"%s\" does not exist", colname), state->rfirst ? closestfirst ? - errhint("Perhaps you meant to reference the column \"%s.%s\".", - state->rfirst->eref->aliasname, closestfirst) : + errhint("Perhaps you meant to reference the column \"%s.%s\".", + state->rfirst->eref->aliasname, closestfirst) : errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.", colname, state->rfirst->eref->aliasname) : 0, parser_errposition(pstate, location))); diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 65284941ed9..6313087174d 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -124,7 +124,7 @@ static void transformFKConstraints(CreateStmtContext *cxt, bool skipValidation, bool isAddConstraint); static void transformCheckConstraints(CreateStmtContext *cxt, - bool skipValidation); + bool skipValidation); static void transformConstraintAttrs(CreateStmtContext *cxt, List *constraintList); static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column); @@ -287,15 +287,14 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) if (like_found) { /* - * To match INHERITS, the existence of any LIKE table with OIDs - * causes the new table to have oids. For the same reason, - * WITH/WITHOUT OIDs is also ignored with LIKE. We prepend - * because the first oid option list entry is honored. Our - * prepended WITHOUT OIDS clause will be overridden if an - * inherited table has oids. + * To match INHERITS, the existence of any LIKE table with OIDs causes + * the new table to have oids. For the same reason, WITH/WITHOUT OIDs + * is also ignored with LIKE. We prepend because the first oid option + * list entry is honored. Our prepended WITHOUT OIDS clause will be + * overridden if an inherited table has oids. */ stmt->options = lcons(makeDefElem("oids", - (Node *)makeInteger(cxt.hasoids)), stmt->options); + (Node *) makeInteger(cxt.hasoids)), stmt->options); } foreach(elements, stmt->tableElts) @@ -305,6 +304,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString) if (nodeTag(element) == T_Constraint) transformTableConstraint(&cxt, (Constraint *) element); } + /* * transformIndexConstraints wants cxt.alist to contain only index * statements, so transfer anything we already have into save_alist. @@ -1949,8 +1949,8 @@ transformCheckConstraints(CreateStmtContext *cxt, bool skipValidation) /* * If creating a new table, we can safely skip validation of check - * constraints, and nonetheless mark them valid. (This will override - * any user-supplied NOT VALID flag.) + * constraints, and nonetheless mark them valid. (This will override any + * user-supplied NOT VALID flag.) */ if (skipValidation) { diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index 4972c3031f0..42169a33cf5 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -35,8 +35,7 @@ pg_spinlock_barrier(void) * * We use kill(0) for the fallback barrier as we assume that kernels on * systems old enough to require fallback barrier support will include an - * appropriate barrier while checking the existence of the postmaster - * pid. + * appropriate barrier while checking the existence of the postmaster pid. */ (void) kill(PostmasterPid, 0); } diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 6bdaac50e0e..2c7446b4a43 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -204,7 +204,7 @@ typedef struct autovac_table * wi_links entry into free list or running list * wi_dboid OID of the database this worker is supposed to work on * wi_tableoid OID of the table currently being vacuumed, if any - * wi_sharedrel flag indicating whether table is marked relisshared + * wi_sharedrel flag indicating whether table is marked relisshared * wi_proc pointer to PGPROC of the running worker, NULL if not started * wi_launchtime Time at which this worker was launched * wi_cost_* Vacuum cost-based delay parameters current in this worker @@ -672,9 +672,9 @@ AutoVacLauncherMain(int argc, char *argv[]) /* * There are some conditions that we need to check before trying to - * start a worker. First, we need to make sure that there is a - * worker slot available. Second, we need to make sure that no - * other worker failed while starting up. + * start a worker. First, we need to make sure that there is a worker + * slot available. Second, we need to make sure that no other worker + * failed while starting up. */ current_time = GetCurrentTimestamp(); diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index d655fbcd835..8fa9edbf729 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -2727,6 +2727,7 @@ pgstat_bestart(void) beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0'; beentry->st_progress_command = PROGRESS_COMMAND_INVALID; beentry->st_progress_command_target = InvalidOid; + /* * we don't zero st_progress_param here to save cycles; nobody should * examine it until st_progress_command has been set to something other @@ -2909,7 +2910,7 @@ pgstat_progress_update_multi_param(int nparam, const int *index, const int64 *val) { volatile PgBackendStatus *beentry = MyBEEntry; - int i; + int i; if (!beentry || !pgstat_track_activities || nparam == 0) return; diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 6cf51e1b64d..6421c8601bc 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -1182,23 +1182,22 @@ PostmasterMain(int argc, char *argv[]) RemovePgTempFiles(); /* - * Forcibly remove the files signaling a standby promotion - * request. Otherwise, the existence of those files triggers - * a promotion too early, whether a user wants that or not. + * Forcibly remove the files signaling a standby promotion request. + * Otherwise, the existence of those files triggers a promotion too early, + * whether a user wants that or not. * - * This removal of files is usually unnecessary because they - * can exist only during a few moments during a standby - * promotion. However there is a race condition: if pg_ctl promote - * is executed and creates the files during a promotion, - * the files can stay around even after the server is brought up - * to new master. Then, if new standby starts by using the backup - * taken from that master, the files can exist at the server + * This removal of files is usually unnecessary because they can exist + * only during a few moments during a standby promotion. However there is + * a race condition: if pg_ctl promote is executed and creates the files + * during a promotion, the files can stay around even after the server is + * brought up to new master. Then, if new standby starts by using the + * backup taken from that master, the files can exist at the server * startup and should be removed in order to avoid an unexpected * promotion. * - * Note that promotion signal files need to be removed before - * the startup process is invoked. Because, after that, they can - * be used by postmaster's SIGUSR1 signal handler. + * Note that promotion signal files need to be removed before the startup + * process is invoked. Because, after that, they can be used by + * postmaster's SIGUSR1 signal handler. */ RemovePromoteSignalFiles(); @@ -2053,9 +2052,9 @@ retry1: else if (!parse_bool(valptr, &am_walsender)) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for parameter \"%s\": \"%s\"", - "replication", - valptr), + errmsg("invalid value for parameter \"%s\": \"%s\"", + "replication", + valptr), errhint("Valid values are: \"false\", 0, \"true\", 1, \"database\"."))); } else @@ -2607,6 +2606,7 @@ pmdie(SIGNAL_ARGS) if (pmState == PM_RECOVERY) { SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER); + /* * Only startup, bgwriter, walreceiver, possibly bgworkers, * and/or checkpointer should be active in this state; we just @@ -3074,9 +3074,9 @@ CleanupBackgroundWorker(int pid, /* * It's possible that this background worker started some OTHER - * background worker and asked to be notified when that worker - * started or stopped. If so, cancel any notifications destined - * for the now-dead backend. + * background worker and asked to be notified when that worker started + * or stopped. If so, cancel any notifications destined for the + * now-dead backend. */ if (rw->rw_backend->bgworker_notify) BackgroundWorkerStopNotifications(rw->rw_pid); @@ -5696,9 +5696,8 @@ maybe_start_bgworker(void) rw->rw_crashed_at = 0; /* - * Allocate and assign the Backend element. Note we - * must do this before forking, so that we can handle out of - * memory properly. + * Allocate and assign the Backend element. Note we must do this + * before forking, so that we can handle out of memory properly. */ if (!assign_backendlist_entry(rw)) return; diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index 100887337c0..da9b7a6f0de 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -117,8 +117,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir) TimeLineID starttli; XLogRecPtr endptr; TimeLineID endtli; - StringInfo labelfile; - StringInfo tblspc_map_file = NULL; + StringInfo labelfile; + StringInfo tblspc_map_file = NULL; int datadirpathlen; List *tablespaces = NIL; diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 0c248f07e8f..46cd5ba1f2d 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -330,7 +330,7 @@ DecodeStandbyOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) case XLOG_INVALIDATIONS: { xl_invalidations *invalidations = - (xl_invalidations *) XLogRecGetData(r); + (xl_invalidations *) XLogRecGetData(r); ReorderBufferImmediateInvalidation( ctx->reorder, invalidations->nmsgs, invalidations->msgs); @@ -488,12 +488,12 @@ FilterByOrigin(LogicalDecodingContext *ctx, RepOriginId origin_id) static void DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) { - SnapBuild *builder = ctx->snapshot_builder; + SnapBuild *builder = ctx->snapshot_builder; XLogReaderState *r = buf->record; - TransactionId xid = XLogRecGetXid(r); - uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; - RepOriginId origin_id = XLogRecGetOrigin(r); - Snapshot snapshot; + TransactionId xid = XLogRecGetXid(r); + uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK; + RepOriginId origin_id = XLogRecGetOrigin(r); + Snapshot snapshot; xl_logical_message *message; if (info != XLOG_LOGICAL_MESSAGE) @@ -522,7 +522,8 @@ DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) snapshot = SnapBuildGetOrBuildSnapshot(builder, xid); ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr, message->transactional, - message->message, /* first part of message is prefix */ + message->message, /* first part of message is + * prefix */ message->message_size, message->message + message->prefix_size); } @@ -536,8 +537,8 @@ DecodeCommit(LogicalDecodingContext *ctx, XLogRecordBuffer *buf, xl_xact_parsed_commit *parsed, TransactionId xid) { XLogRecPtr origin_lsn = InvalidXLogRecPtr; - TimestampTz commit_time = parsed->xact_time; - RepOriginId origin_id = XLogRecGetOrigin(buf->record); + TimestampTz commit_time = parsed->xact_time; + RepOriginId origin_id = XLogRecGetOrigin(buf->record); int i; if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN) diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index 5ccfd3105f0..7c8a777b339 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -63,8 +63,8 @@ static void commit_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change); static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn, - XLogRecPtr message_lsn, bool transactional, - const char *prefix, Size message_size, const char *message); + XLogRecPtr message_lsn, bool transactional, + const char *prefix, Size message_size, const char *message); static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin); diff --git a/src/backend/replication/logical/message.c b/src/backend/replication/logical/message.c index efcc25ae957..8f9dc2f47c6 100644 --- a/src/backend/replication/logical/message.c +++ b/src/backend/replication/logical/message.c @@ -51,7 +51,7 @@ XLogRecPtr LogLogicalMessage(const char *prefix, const char *message, size_t size, bool transactional) { - xl_logical_message xlrec; + xl_logical_message xlrec; /* * Force xid to be allocated if we're emitting a transactional message. @@ -87,7 +87,7 @@ logicalmsg_redo(XLogReaderState *record) uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK; if (info != XLOG_LOGICAL_MESSAGE) - elog(PANIC, "logicalmsg_redo: unknown op code %u", info); + elog(PANIC, "logicalmsg_redo: unknown op code %u", info); /* This is only interesting for logical decoding, see decode.c. */ } diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index 9aeb2d85977..cc2b5132366 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -148,7 +148,7 @@ typedef struct ReplicationStateCtl } ReplicationStateCtl; /* external variables */ -RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */ +RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */ XLogRecPtr replorigin_session_origin_lsn = InvalidXLogRecPtr; TimestampTz replorigin_session_origin_timestamp = 0; diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 52b0d41fa69..00e31a2d740 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -666,8 +666,8 @@ ReorderBufferQueueMessage(ReorderBuffer *rb, TransactionId xid, } else { - ReorderBufferTXN *txn = NULL; - volatile Snapshot snapshot_now = snapshot; + ReorderBufferTXN *txn = NULL; + volatile Snapshot snapshot_now = snapshot; if (xid != InvalidTransactionId) txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true); @@ -1836,10 +1836,10 @@ ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations, BeginInternalSubTransaction("replay"); /* - * Force invalidations to happen outside of a valid transaction - that - * way entries will just be marked as invalid without accessing the - * catalog. That's advantageous because we don't need to setup the - * full state necessary for catalog access. + * Force invalidations to happen outside of a valid transaction - that way + * entries will just be marked as invalid without accessing the catalog. + * That's advantageous because we don't need to setup the full state + * necessary for catalog access. */ if (use_subtxn) AbortCurrentTransaction(); @@ -2543,14 +2543,14 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn, change->data.msg.prefix = MemoryContextAlloc(rb->context, prefix_size); memcpy(change->data.msg.prefix, data, prefix_size); - Assert(change->data.msg.prefix[prefix_size-1] == '\0'); + Assert(change->data.msg.prefix[prefix_size - 1] == '\0'); data += prefix_size; /* read the messsage */ memcpy(&change->data.msg.message_size, data, sizeof(Size)); data += sizeof(Size); change->data.msg.message = MemoryContextAlloc(rb->context, - change->data.msg.message_size); + change->data.msg.message_size); memcpy(change->data.msg.message, data, change->data.msg.message_size); data += change->data.msg.message_size; diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 644c52ea417..2fb7c17d7da 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -230,11 +230,11 @@ ReplicationSlotCreate(const char *name, bool db_specific, ReplicationSlotValidateName(name, ERROR); /* - * If some other backend ran this code concurrently with us, we'd likely both - * allocate the same slot, and that would be bad. We'd also be at risk of - * missing a name collision. Also, we don't want to try to create a new - * slot while somebody's busy cleaning up an old one, because we might - * both be monkeying with the same directory. + * If some other backend ran this code concurrently with us, we'd likely + * both allocate the same slot, and that would be bad. We'd also be at + * risk of missing a name collision. Also, we don't want to try to create + * a new slot while somebody's busy cleaning up an old one, because we + * might both be monkeying with the same directory. */ LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE); @@ -352,8 +352,8 @@ ReplicationSlotAcquire(const char *name) if (active_pid != 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_IN_USE), - errmsg("replication slot \"%s\" is active for PID %d", - name, active_pid))); + errmsg("replication slot \"%s\" is active for PID %d", + name, active_pid))); /* We made this slot active, so it's ours now. */ MyReplicationSlot = slot; @@ -533,6 +533,7 @@ void ReplicationSlotMarkDirty(void) { ReplicationSlot *slot = MyReplicationSlot; + Assert(MyReplicationSlot != NULL); SpinLockAcquire(&slot->mutex); diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c index 9cc24eadf23..f9087619d2b 100644 --- a/src/backend/replication/slotfuncs.c +++ b/src/backend/replication/slotfuncs.c @@ -40,7 +40,7 @@ Datum pg_create_physical_replication_slot(PG_FUNCTION_ARGS) { Name name = PG_GETARG_NAME(0); - bool immediately_reserve = PG_GETARG_BOOL(1); + bool immediately_reserve = PG_GETARG_BOOL(1); Datum values[2]; bool nulls[2]; TupleDesc tupdesc; diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 959ca78a1ef..67249d80c8b 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -86,9 +86,9 @@ static void SyncRepCancelWait(void); static int SyncRepWakeQueue(bool all, int mode); static bool SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, - XLogRecPtr *flushPtr, - XLogRecPtr *applyPtr, - bool *am_sync); + XLogRecPtr *flushPtr, + XLogRecPtr *applyPtr, + bool *am_sync); static int SyncRepGetStandbyPriority(void); #ifdef USE_ASSERT_CHECKING @@ -212,8 +212,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit) /* * If a wait for synchronous replication is pending, we can neither * acknowledge the commit nor raise ERROR or FATAL. The latter would - * lead the client to believe that the transaction aborted, which - * is not true: it's already committed locally. The former is no good + * lead the client to believe that the transaction aborted, which is + * not true: it's already committed locally. The former is no good * either: the client has requested synchronous replication, and is * entitled to assume that an acknowledged commit is also replicated, * which might not be true. So in this case we issue a WARNING (which @@ -400,8 +400,8 @@ SyncRepReleaseWaiters(void) /* * If this WALSender is serving a standby that is not on the list of * potential sync standbys then we have nothing to do. If we are still - * starting up, still running base backup or the current flush position - * is still invalid, then leave quickly also. + * starting up, still running base backup or the current flush position is + * still invalid, then leave quickly also. */ if (MyWalSnd->sync_standby_priority == 0 || MyWalSnd->state < WALSNDSTATE_STREAMING || @@ -412,21 +412,21 @@ SyncRepReleaseWaiters(void) } /* - * We're a potential sync standby. Release waiters if there are - * enough sync standbys and we are considered as sync. + * We're a potential sync standby. Release waiters if there are enough + * sync standbys and we are considered as sync. */ LWLockAcquire(SyncRepLock, LW_EXCLUSIVE); /* - * Check whether we are a sync standby or not, and calculate - * the oldest positions among all sync standbys. + * Check whether we are a sync standby or not, and calculate the oldest + * positions among all sync standbys. */ got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr, &applyPtr, &am_sync); /* - * If we are managing a sync standby, though we weren't - * prior to this, then announce we are now a sync standby. + * If we are managing a sync standby, though we weren't prior to this, + * then announce we are now a sync standby. */ if (announce_next_takeover && am_sync) { @@ -489,8 +489,8 @@ static bool SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, XLogRecPtr *applyPtr, bool *am_sync) { - List *sync_standbys; - ListCell *cell; + List *sync_standbys; + ListCell *cell; *writePtr = InvalidXLogRecPtr; *flushPtr = InvalidXLogRecPtr; @@ -513,12 +513,12 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, } /* - * Scan through all sync standbys and calculate the oldest - * Write, Flush and Apply positions. + * Scan through all sync standbys and calculate the oldest Write, Flush + * and Apply positions. */ - foreach (cell, sync_standbys) + foreach(cell, sync_standbys) { - WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; + WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)]; XLogRecPtr write; XLogRecPtr flush; XLogRecPtr apply; @@ -554,16 +554,16 @@ SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr, List * SyncRepGetSyncStandbys(bool *am_sync) { - List *result = NIL; - List *pending = NIL; - int lowest_priority; - int next_highest_priority; - int this_priority; - int priority; - int i; - bool am_in_pending = false; - volatile WalSnd *walsnd; /* Use volatile pointer to prevent - * code rearrangement */ + List *result = NIL; + List *pending = NIL; + int lowest_priority; + int next_highest_priority; + int this_priority; + int priority; + int i; + bool am_in_pending = false; + volatile WalSnd *walsnd; /* Use volatile pointer to prevent code + * rearrangement */ /* Set default result */ if (am_sync != NULL) @@ -577,9 +577,9 @@ SyncRepGetSyncStandbys(bool *am_sync) next_highest_priority = lowest_priority + 1; /* - * Find the sync standbys which have the highest priority (i.e, 1). - * Also store all the other potential sync standbys into the pending list, - * in order to scan it later and find other sync standbys from it quickly. + * Find the sync standbys which have the highest priority (i.e, 1). Also + * store all the other potential sync standbys into the pending list, in + * order to scan it later and find other sync standbys from it quickly. */ for (i = 0; i < max_wal_senders; i++) { @@ -603,9 +603,9 @@ SyncRepGetSyncStandbys(bool *am_sync) continue; /* - * If the priority is equal to 1, consider this standby as sync - * and append it to the result. Otherwise append this standby - * to the pending list to check if it's actually sync or not later. + * If the priority is equal to 1, consider this standby as sync and + * append it to the result. Otherwise append this standby to the + * pending list to check if it's actually sync or not later. */ if (this_priority == 1) { @@ -615,7 +615,7 @@ SyncRepGetSyncStandbys(bool *am_sync) if (list_length(result) == SyncRepConfig->num_sync) { list_free(pending); - return result; /* Exit if got enough sync standbys */ + return result; /* Exit if got enough sync standbys */ } } else @@ -626,10 +626,10 @@ SyncRepGetSyncStandbys(bool *am_sync) /* * Track the highest priority among the standbys in the pending - * list, in order to use it as the starting priority for later scan - * of the list. This is useful to find quickly the sync standbys - * from the pending list later because we can skip unnecessary - * scans for the unused priorities. + * list, in order to use it as the starting priority for later + * scan of the list. This is useful to find quickly the sync + * standbys from the pending list later because we can skip + * unnecessary scans for the unused priorities. */ if (this_priority < next_highest_priority) next_highest_priority = this_priority; @@ -663,9 +663,9 @@ SyncRepGetSyncStandbys(bool *am_sync) priority = next_highest_priority; while (priority <= lowest_priority) { - ListCell *cell; - ListCell *prev = NULL; - ListCell *next; + ListCell *cell; + ListCell *prev = NULL; + ListCell *next; next_highest_priority = lowest_priority + 1; @@ -685,8 +685,8 @@ SyncRepGetSyncStandbys(bool *am_sync) /* * We should always exit here after the scan of pending list - * starts because we know that the list has enough elements - * to reach SyncRepConfig->num_sync. + * starts because we know that the list has enough elements to + * reach SyncRepConfig->num_sync. */ if (list_length(result) == SyncRepConfig->num_sync) { @@ -695,8 +695,8 @@ SyncRepGetSyncStandbys(bool *am_sync) } /* - * Remove the entry for this sync standby from the list - * to prevent us from looking at the same entry again. + * Remove the entry for this sync standby from the list to + * prevent us from looking at the same entry again. */ pending = list_delete_cell(pending, cell, prev); diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index 6fd5952be71..ce311cb8972 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -463,7 +463,7 @@ WalReceiverMain(void) */ Assert(wait_fd != PGINVALID_SOCKET); rc = WaitLatchOrSocket(&walrcv->latch, - WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | + WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT | WL_LATCH_SET, wait_fd, NAPTIME_PER_CYCLE); @@ -475,8 +475,8 @@ WalReceiverMain(void) /* * The recovery process has asked us to send apply * feedback now. Make sure the flag is really set to - * false in shared memory before sending the reply, - * so we don't miss a new request for a reply. + * false in shared memory before sending the reply, so + * we don't miss a new request for a reply. */ walrcv->force_reply = false; pg_memory_barrier(); @@ -1318,10 +1318,10 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) TimeLineID receive_start_tli; XLogRecPtr received_lsn; TimeLineID received_tli; - TimestampTz last_send_time; - TimestampTz last_receipt_time; + TimestampTz last_send_time; + TimestampTz last_receipt_time; XLogRecPtr latest_end_lsn; - TimestampTz latest_end_time; + TimestampTz latest_end_time; char *slotname; /* No WAL receiver, just return a tuple with NULL values */ @@ -1379,8 +1379,8 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) if (!superuser()) { /* - * Only superusers can see details. Other users only get the pid - * value to know whether it is a WAL receiver, but no details. + * Only superusers can see details. Other users only get the pid value + * to know whether it is a WAL receiver, but no details. */ MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1); } @@ -1422,5 +1422,5 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum( - heap_form_tuple(tupdesc, values, nulls))); + heap_form_tuple(tupdesc, values, nulls))); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index 5cd4d51865d..a0dba194a61 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -464,7 +464,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd) pq_beginmessage(&buf, 'D'); pq_sendint(&buf, 2, 2); /* # of columns */ len = strlen(histfname); - pq_sendint(&buf, len, 4); /* col1 len */ + pq_sendint(&buf, len, 4); /* col1 len */ pq_sendbytes(&buf, histfname, len); fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0666); @@ -657,7 +657,7 @@ StartReplication(StartReplicationCmd *cmd) /* Initialize shared memory status, too */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -728,7 +728,7 @@ StartReplication(StartReplicationCmd *cmd) pq_sendint(&buf, 2, 2); /* number of columns */ len = strlen(tli_str); - pq_sendint(&buf, len, 4); /* length */ + pq_sendint(&buf, len, 4); /* length */ pq_sendbytes(&buf, tli_str, len); len = strlen(startpos_str); @@ -901,7 +901,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd) /* slot_name */ len = strlen(NameStr(MyReplicationSlot->data.name)); - pq_sendint(&buf, len, 4); /* col1 len */ + pq_sendint(&buf, len, 4); /* col1 len */ pq_sendbytes(&buf, NameStr(MyReplicationSlot->data.name), len); /* consistent wal location */ @@ -1008,7 +1008,7 @@ StartLogicalReplication(StartReplicationCmd *cmd) /* Also update the sent position status in shared memory */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = MyReplicationSlot->data.restart_lsn; @@ -1569,7 +1569,7 @@ ProcessStandbyReplyMessage(void) * standby. */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->write = writePtr; @@ -1948,7 +1948,7 @@ InitWalSenderSlot(void) */ for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; SpinLockAcquire(&walsnd->mutex); @@ -2161,7 +2161,7 @@ retry: */ if (am_cascading_walsender) { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; bool reload; SpinLockAcquire(&walsnd->mutex); @@ -2399,7 +2399,7 @@ XLogSendPhysical(void) /* Update shared memory status */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -2461,7 +2461,7 @@ XLogSendLogical(void) /* Update shared memory status */ { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; SpinLockAcquire(&walsnd->mutex); walsnd->sentPtr = sentPtr; @@ -2556,7 +2556,7 @@ WalSndRqstFileReload(void) for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; if (walsnd->pid == 0) continue; @@ -2708,7 +2708,7 @@ WalSndWakeup(void) void WalSndSetState(WalSndState state) { - WalSnd *walsnd = MyWalSnd; + WalSnd *walsnd = MyWalSnd; Assert(am_walsender); @@ -2792,7 +2792,7 @@ pg_stat_get_wal_senders(PG_FUNCTION_ARGS) for (i = 0; i < max_wal_senders; i++) { - WalSnd *walsnd = &WalSndCtl->walsnds[i]; + WalSnd *walsnd = &WalSndCtl->walsnds[i]; XLogRecPtr sentPtr; XLogRecPtr write; XLogRecPtr flush; diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 96f3f376675..f82d891c347 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -414,8 +414,8 @@ DefineQueryRewrite(char *rulename, * any triggers, indexes, child tables, policies, or RLS enabled. * (Note: these tests are too strict, because they will reject * relations that once had such but don't anymore. But we don't - * really care, because this whole business of converting relations - * to views is just a kluge to allow dump/reload of views that + * really care, because this whole business of converting relations to + * views is just a kluge to allow dump/reload of views that * participate in circular dependencies.) */ if (event_relation->rd_rel->relkind != RELKIND_VIEW && diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 970fa33843f..e02911656a3 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -64,21 +64,21 @@ static void get_policies_for_relation(Relation relation, static List *sort_policies_by_name(List *policies); -static int row_security_policy_cmp(const void *a, const void *b); +static int row_security_policy_cmp(const void *a, const void *b); static void add_security_quals(int rt_index, - List *permissive_policies, - List *restrictive_policies, - List **securityQuals, - bool *hasSubLinks); + List *permissive_policies, + List *restrictive_policies, + List **securityQuals, + bool *hasSubLinks); static void add_with_check_options(Relation rel, - int rt_index, - WCOKind kind, - List *permissive_policies, - List *restrictive_policies, - List **withCheckOptions, - bool *hasSubLinks); + int rt_index, + WCOKind kind, + List *permissive_policies, + List *restrictive_policies, + List **withCheckOptions, + bool *hasSubLinks); static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id); @@ -163,29 +163,31 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, rel = heap_open(rte->relid, NoLock); commandType = rt_index == root->resultRelation ? - root->commandType : CMD_SELECT; + root->commandType : CMD_SELECT; /* * In some cases, we need to apply USING policies (which control the * visibility of records) associated with multiple command types (see * specific cases below). * - * When considering the order in which to apply these USING policies, - * we prefer to apply higher privileged policies, those which allow the - * user to lock records (UPDATE and DELETE), first, followed by policies - * which don't (SELECT). + * When considering the order in which to apply these USING policies, we + * prefer to apply higher privileged policies, those which allow the user + * to lock records (UPDATE and DELETE), first, followed by policies which + * don't (SELECT). * * Note that the optimizer is free to push down and reorder quals which * use leakproof functions. * * In all cases, if there are no policy clauses allowing access to rows in - * the table for the specific type of operation, then a single always-false - * clause (a default-deny policy) will be added (see add_security_quals). + * the table for the specific type of operation, then a single + * always-false clause (a default-deny policy) will be added (see + * add_security_quals). */ /* * For a SELECT, if UPDATE privileges are required (eg: the user has - * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first. + * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals + * first. * * This way, we filter out any records from the SELECT FOR SHARE/UPDATE * which the user does not have access to via the UPDATE USING policies, @@ -232,8 +234,8 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, * a WHERE clause which involves columns from the relation), we collect up * CMD_SELECT policies and add them via add_security_quals first. * - * This way, we filter out any records which are not visible through an ALL - * or SELECT USING policy. + * This way, we filter out any records which are not visible through an + * ALL or SELECT USING policy. */ if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) && rte->requiredPerms & ACL_SELECT) @@ -272,9 +274,9 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, hasSubLinks); /* - * Get and add ALL/SELECT policies, if SELECT rights are required - * for this relation (eg: when RETURNING is used). These are added as - * WCO policies rather than security quals to ensure that an error is + * Get and add ALL/SELECT policies, if SELECT rights are required for + * this relation (eg: when RETURNING is used). These are added as WCO + * policies rather than security quals to ensure that an error is * raised if a policy is violated; otherwise, we might end up silently * dropping rows to be added. */ @@ -288,7 +290,7 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, &select_restrictive_policies); add_with_check_options(rel, rt_index, commandType == CMD_INSERT ? - WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK, + WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK, select_permissive_policies, select_restrictive_policies, withCheckOptions, @@ -324,11 +326,11 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, hasSubLinks); /* - * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK - * WCOs to ensure they are considered when taking the UPDATE - * path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT - * rights are required for this relation, also as WCO policies, - * again, to avoid silently dropping data. See above. + * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs + * to ensure they are considered when taking the UPDATE path of an + * INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required + * for this relation, also as WCO policies, again, to avoid + * silently dropping data. See above. */ if (rte->requiredPerms & ACL_SELECT) { @@ -336,7 +338,7 @@ get_row_security_policies(Query *root, RangeTblEntry *rte, int rt_index, List *conflict_select_restrictive_policies = NIL; get_policies_for_relation(rel, CMD_SELECT, user_id, - &conflict_select_permissive_policies, + &conflict_select_permissive_policies, &conflict_select_restrictive_policies); add_with_check_options(rel, rt_index, WCO_RLS_CONFLICT_CHECK, @@ -392,8 +394,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, */ foreach(item, relation->rd_rsdesc->policies) { - bool cmd_matches = false; - RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); + bool cmd_matches = false; + RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); /* Always add ALL policies, if they exist. */ if (policy->polcmd == '*') @@ -427,8 +429,8 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, } /* - * Add this policy to the list of permissive policies if it - * applies to the specified role. + * Add this policy to the list of permissive policies if it applies to + * the specified role. */ if (cmd_matches && check_role_for_policy(policy->roles, user_id)) *permissive_policies = lappend(*permissive_policies, policy); @@ -442,7 +444,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_restrictive) { List *hook_policies = - (*row_security_policy_hook_restrictive) (cmd, relation); + (*row_security_policy_hook_restrictive) (cmd, relation); /* * We sort restrictive policies by name so that any WCOs they generate @@ -462,7 +464,7 @@ get_policies_for_relation(Relation relation, CmdType cmd, Oid user_id, if (row_security_policy_hook_permissive) { List *hook_policies = - (*row_security_policy_hook_permissive) (cmd, relation); + (*row_security_policy_hook_permissive) (cmd, relation); foreach(item, hook_policies) { @@ -498,6 +500,7 @@ sort_policies_by_name(List *policies) foreach(item, policies) { RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item); + pols[ii++] = *policy; } @@ -551,8 +554,8 @@ add_security_quals(int rt_index, Expr *rowsec_expr; /* - * First collect up the permissive quals. If we do not find any permissive - * policies then no rows are visible (this is handled below). + * First collect up the permissive quals. If we do not find any + * permissive policies then no rows are visible (this is handled below). */ foreach(item, permissive_policies) { @@ -577,8 +580,8 @@ add_security_quals(int rt_index, /* * We now know that permissive policies exist, so we can now add * security quals based on the USING clauses from the restrictive - * policies. Since these need to be "AND"d together, we can - * just add them one at a time. + * policies. Since these need to be "AND"d together, we can just add + * them one at a time. */ foreach(item, restrictive_policies) { @@ -608,6 +611,7 @@ add_security_quals(int rt_index, *securityQuals = list_append_unique(*securityQuals, rowsec_expr); } else + /* * A permissive policy must exist for rows to be visible at all. * Therefore, if there were no permissive policies found, return a @@ -647,7 +651,7 @@ add_with_check_options(Relation rel, List *permissive_quals = NIL; #define QUAL_FOR_WCO(policy) \ - ( kind != WCO_RLS_CONFLICT_CHECK && \ + ( kind != WCO_RLS_CONFLICT_CHECK && \ (policy)->with_check_qual != NULL ? \ (policy)->with_check_qual : (policy)->qual ) @@ -668,11 +672,11 @@ add_with_check_options(Relation rel, } /* - * There must be at least one permissive qual found or no rows are - * allowed to be added. This is the same as in add_security_quals. + * There must be at least one permissive qual found or no rows are allowed + * to be added. This is the same as in add_security_quals. * - * If there are no permissive_quals then we fall through and return a single - * 'false' WCO, preventing all new rows. + * If there are no permissive_quals then we fall through and return a + * single 'false' WCO, preventing all new rows. */ if (permissive_quals != NIL) { diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index 5804870ad48..a4163cf717d 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -187,11 +187,12 @@ BufferShmemSize(void) /* * It would be nice to include the I/O locks in the BufferDesc, but that - * would increase the size of a BufferDesc to more than one cache line, and - * benchmarking has shown that keeping every BufferDesc aligned on a cache - * line boundary is important for performance. So, instead, the array of - * I/O locks is allocated in a separate tranche. Because those locks are - * not highly contentended, we lay out the array with minimal padding. + * would increase the size of a BufferDesc to more than one cache line, + * and benchmarking has shown that keeping every BufferDesc aligned on a + * cache line boundary is important for performance. So, instead, the + * array of I/O locks is allocated in a separate tranche. Because those + * locks are not highly contentended, we lay out the array with minimal + * padding. */ size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded))); /* to allow aligning the above */ diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 8a830d4f21d..59a8a85dfcd 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -4291,8 +4291,8 @@ void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation) { if (!IsCatalogRelation(relation) - && !RelationIsAccessibleInLogicalDecoding(relation) - && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp()) + && !RelationIsAccessibleInLogicalDecoding(relation) + && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp()) ereport(ERROR, (errcode(ERRCODE_SNAPSHOT_TOO_OLD), errmsg("snapshot too old"))); diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index 2ffa8ff24d0..bbd90c911aa 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -199,13 +199,13 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail) */ void UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, - BlockNumber endBlkNum, Size freespace) + BlockNumber endBlkNum, Size freespace) { int new_cat = fsm_space_avail_to_cat(freespace); FSMAddress addr; uint16 slot; - BlockNumber blockNum; - BlockNumber lastBlkOnPage; + BlockNumber blockNum; + BlockNumber lastBlkOnPage; blockNum = startBlkNum; @@ -219,9 +219,9 @@ UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum, fsm_update_recursive(rel, addr, new_cat); /* - * Get the last block number on this FSM page. If that's greater - * than or equal to our endBlkNum, we're done. Otherwise, advance - * to the first block on the next page. + * Get the last block number on this FSM page. If that's greater than + * or equal to our endBlkNum, we're done. Otherwise, advance to the + * first block on the next page. */ lastBlkOnPage = fsm_get_lastblckno(rel, addr); if (lastBlkOnPage >= endBlkNum) @@ -841,8 +841,8 @@ fsm_get_lastblckno(Relation rel, FSMAddress addr) int slot; /* - * Get the last slot number on the given address and convert that to - * block number + * Get the last slot number on the given address and convert that to block + * number */ slot = SlotsPerFSMPage - 1; return fsm_get_heap_blk(addr, slot); @@ -862,8 +862,8 @@ fsm_update_recursive(Relation rel, FSMAddress addr, uint8 new_cat) return; /* - * Get the parent page and our slot in the parent page, and - * update the information in that. + * Get the parent page and our slot in the parent page, and update the + * information in that. */ parent = fsm_get_parent(addr, &parentslot); fsm_set_and_search(rel, parent, parentslot, new_cat, 0); diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c index cd13a6284c1..47f2bea0be3 100644 --- a/src/backend/storage/ipc/dsm.c +++ b/src/backend/storage/ipc/dsm.c @@ -245,8 +245,8 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle) } /* - * OK, the control segment looks basically valid, so we can use it to - * get a list of segments that need to be removed. + * OK, the control segment looks basically valid, so we can use it to get + * a list of segments that need to be removed. */ nitems = old_control->nitems; for (i = 0; i < nitems; ++i) diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index e9de51bdfa0..e5d487dbb74 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -460,7 +460,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact, pgxact->xmin = InvalidTransactionId; /* must be cleared with xid/xmin: */ pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK; - pgxact->delayChkpt = false; /* be sure this is cleared in abort */ + pgxact->delayChkpt = false; /* be sure this is cleared in abort */ proc->recoveryConflictPending = false; /* Clear the subtransaction-XID cache too while holding the lock */ @@ -559,8 +559,8 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid) /* Walk the list and clear all XIDs. */ while (nextidx != INVALID_PGPROCNO) { - PGPROC *proc = &allProcs[nextidx]; - PGXACT *pgxact = &allPgXact[nextidx]; + PGPROC *proc = &allProcs[nextidx]; + PGXACT *pgxact = &allPgXact[nextidx]; ProcArrayEndTransactionInternal(proc, pgxact, proc->procArrayGroupMemberXid); @@ -580,7 +580,7 @@ ProcArrayGroupClearXid(PGPROC *proc, TransactionId latestXid) */ while (wakeidx != INVALID_PGPROCNO) { - PGPROC *proc = &allProcs[wakeidx]; + PGPROC *proc = &allProcs[wakeidx]; wakeidx = pg_atomic_read_u32(&proc->procArrayGroupNext); pg_atomic_write_u32(&proc->procArrayGroupNext, INVALID_PGPROCNO); @@ -642,8 +642,8 @@ ProcArrayInitRecovery(TransactionId initializedUptoXID) Assert(TransactionIdIsNormal(initializedUptoXID)); /* - * we set latestObservedXid to the xid SUBTRANS has been initialized up to, - * so we can extend it from that point onwards in + * we set latestObservedXid to the xid SUBTRANS has been initialized up + * to, so we can extend it from that point onwards in * RecordKnownAssignedTransactionIds, and when we get consistent in * ProcArrayApplyRecoveryInfo(). */ @@ -2591,8 +2591,8 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid) /* * We ignore an invalid pxmin because this means that backend has * no snapshot currently. We hold a Share lock to avoid contention - * with users taking snapshots. That is not a problem because - * the current xmin is always at least one higher than the latest + * with users taking snapshots. That is not a problem because the + * current xmin is always at least one higher than the latest * removed xid, so any new snapshot would never conflict with the * test here. */ diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 03ca79b5e36..5f6226c9bb9 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -1007,8 +1007,8 @@ shm_mq_receive_bytes(shm_mq *mq, Size bytes_needed, bool nowait, static bool shm_mq_counterparty_gone(volatile shm_mq *mq, BackgroundWorkerHandle *handle) { - bool detached; - pid_t pid; + bool detached; + pid_t pid; /* Acquire the lock just long enough to check the pointer. */ SpinLockAcquire(&mq->mq_mutex); diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 762dfa65eb9..7a512b3f81d 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -370,6 +370,7 @@ ResolveRecoveryConflictWithLock(LOCKTAG locktag) * We're already behind, so clear a path as quickly as possible. */ VirtualTransactionId *backends; + backends = GetLockConflicts(&locktag, AccessExclusiveLock); ResolveRecoveryConflictWithVirtualXIDs(backends, PROCSIG_RECOVERY_CONFLICT_LOCK); diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 41f69306459..dba3809e740 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -1153,13 +1153,13 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, uint32 partition = LockHashPartition(hashcode); /* - * It might seem unsafe to access proclock->groupLeader without a lock, - * but it's not really. Either we are initializing a proclock on our - * own behalf, in which case our group leader isn't changing because - * the group leader for a process can only ever be changed by the - * process itself; or else we are transferring a fast-path lock to the - * main lock table, in which case that process can't change it's lock - * group leader without first releasing all of its locks (and in + * It might seem unsafe to access proclock->groupLeader without a + * lock, but it's not really. Either we are initializing a proclock + * on our own behalf, in which case our group leader isn't changing + * because the group leader for a process can only ever be changed by + * the process itself; or else we are transferring a fast-path lock to + * the main lock table, in which case that process can't change it's + * lock group leader without first releasing all of its locks (and in * particular the one we are currently transferring). */ proclock->groupLeader = proc->lockGroupLeader != NULL ? @@ -1319,10 +1319,9 @@ LockCheckConflicts(LockMethod lockMethodTable, } /* - * Rats. Something conflicts. But it could still be my own lock, or - * a lock held by another member of my locking group. First, figure out - * how many conflicts remain after subtracting out any locks I hold - * myself. + * Rats. Something conflicts. But it could still be my own lock, or a + * lock held by another member of my locking group. First, figure out how + * many conflicts remain after subtracting out any locks I hold myself. */ myLocks = proclock->holdMask; for (i = 1; i <= numLockModes; i++) @@ -1357,9 +1356,10 @@ LockCheckConflicts(LockMethod lockMethodTable, /* * Locks held in conflicting modes by members of our own lock group are * not real conflicts; we can subtract those out and see if we still have - * a conflict. This is O(N) in the number of processes holding or awaiting - * locks on this object. We could improve that by making the shared memory - * state more complex (and larger) but it doesn't seem worth it. + * a conflict. This is O(N) in the number of processes holding or + * awaiting locks on this object. We could improve that by making the + * shared memory state more complex (and larger) but it doesn't seem worth + * it. */ procLocks = &(lock->procLocks); otherproclock = (PROCLOCK *) @@ -1370,7 +1370,7 @@ LockCheckConflicts(LockMethod lockMethodTable, proclock->groupLeader == otherproclock->groupLeader && (otherproclock->holdMask & conflictMask) != 0) { - int intersectMask = otherproclock->holdMask & conflictMask; + int intersectMask = otherproclock->holdMask & conflictMask; for (i = 1; i <= numLockModes; i++) { @@ -2583,8 +2583,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag * * proc->databaseId is set at backend startup time and never changes * thereafter, so it might be safe to perform this test before - * acquiring &proc->backendLock. In particular, it's certainly safe to - * assume that if the target backend holds any fast-path locks, it + * acquiring &proc->backendLock. In particular, it's certainly safe + * to assume that if the target backend holds any fast-path locks, it * must have performed a memory-fencing operation (in particular, an * LWLock acquisition) since setting proc->databaseId. However, it's * less clear that our backend is certain to have performed a memory diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 25eec9800de..7ffa87d914b 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -208,25 +208,25 @@ PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode) (errhidestmt(true), errhidecontext(true), errmsg_internal("%d: %s(%s): excl %u shared %u haswaiters %u waiters %u rOK %d", - MyProcPid, - where, MainLWLockNames[id], - (state & LW_VAL_EXCLUSIVE) != 0, - state & LW_SHARED_MASK, - (state & LW_FLAG_HAS_WAITERS) != 0, - pg_atomic_read_u32(&lock->nwaiters), - (state & LW_FLAG_RELEASE_OK) != 0))); + MyProcPid, + where, MainLWLockNames[id], + (state & LW_VAL_EXCLUSIVE) != 0, + state & LW_SHARED_MASK, + (state & LW_FLAG_HAS_WAITERS) != 0, + pg_atomic_read_u32(&lock->nwaiters), + (state & LW_FLAG_RELEASE_OK) != 0))); else ereport(LOG, (errhidestmt(true), errhidecontext(true), errmsg_internal("%d: %s(%s %d): excl %u shared %u haswaiters %u waiters %u rOK %d", - MyProcPid, - where, T_NAME(lock), id, - (state & LW_VAL_EXCLUSIVE) != 0, - state & LW_SHARED_MASK, - (state & LW_FLAG_HAS_WAITERS) != 0, - pg_atomic_read_u32(&lock->nwaiters), - (state & LW_FLAG_RELEASE_OK) != 0))); + MyProcPid, + where, T_NAME(lock), id, + (state & LW_VAL_EXCLUSIVE) != 0, + state & LW_SHARED_MASK, + (state & LW_FLAG_HAS_WAITERS) != 0, + pg_atomic_read_u32(&lock->nwaiters), + (state & LW_FLAG_RELEASE_OK) != 0))); } } @@ -243,13 +243,13 @@ LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg) (errhidestmt(true), errhidecontext(true), errmsg_internal("%s(%s): %s", where, - MainLWLockNames[id], msg))); + MainLWLockNames[id], msg))); else ereport(LOG, (errhidestmt(true), errhidecontext(true), errmsg_internal("%s(%s %d): %s", where, - T_NAME(lock), id, msg))); + T_NAME(lock), id, msg))); } } @@ -760,8 +760,8 @@ GetLWLockIdentifier(uint8 classId, uint16 eventId) /* * It is quite possible that user has registered tranche in one of the - * backends (e.g. by allocating lwlocks in dynamic shared memory) but - * not all of them, so we can't assume the tranche is registered here. + * backends (e.g. by allocating lwlocks in dynamic shared memory) but not + * all of them, so we can't assume the tranche is registered here. */ if (eventId >= LWLockTranchesAllocated || LWLockTrancheArray[eventId]->name == NULL) diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index a66e07b7665..9a758bd9160 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -288,7 +288,7 @@ InitProcGlobal(void) void InitProcess(void) { - PGPROC * volatile * procgloballist; + PGPROC *volatile * procgloballist; /* * ProcGlobal should be set up already (if we are a backend, we inherit @@ -342,8 +342,8 @@ InitProcess(void) MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno]; /* - * Cross-check that the PGPROC is of the type we expect; if this were - * not the case, it would get returned to the wrong list. + * Cross-check that the PGPROC is of the type we expect; if this were not + * the case, it would get returned to the wrong list. */ Assert(MyProc->procgloballist == procgloballist); @@ -781,7 +781,7 @@ static void ProcKill(int code, Datum arg) { PGPROC *proc; - PGPROC * volatile * procgloballist; + PGPROC *volatile * procgloballist; Assert(MyProc != NULL); diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index 8b46ea5bf91..c43c206c030 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -25,23 +25,23 @@ * * A compiled dictionary is stored in the IspellDict structure. Compilation of * a dictionary is divided into the several steps: - * - NIImportDictionary() - stores each word of a .dict file in the - * temporary Spell field. - * - NIImportAffixes() - stores affix rules of an .affix file in the - * Affix field (not temporary) if an .affix file has the Ispell format. - * -> NIImportOOAffixes() - stores affix rules if an .affix file has the - * Hunspell format. The AffixData field is initialized if AF parameter - * is defined. - * - NISortDictionary() - builds a prefix tree (Trie) from the words list - * and stores it in the Dictionary field. The words list is got from the - * Spell field. The AffixData field is initialized if AF parameter is not - * defined. - * - NISortAffixes(): - * - builds a list of compond affixes from the affix list and stores it - * in the CompoundAffix. - * - builds prefix trees (Trie) from the affix list for prefixes and suffixes - * and stores them in Suffix and Prefix fields. - * The affix list is got from the Affix field. + * - NIImportDictionary() - stores each word of a .dict file in the + * temporary Spell field. + * - NIImportAffixes() - stores affix rules of an .affix file in the + * Affix field (not temporary) if an .affix file has the Ispell format. + * -> NIImportOOAffixes() - stores affix rules if an .affix file has the + * Hunspell format. The AffixData field is initialized if AF parameter + * is defined. + * - NISortDictionary() - builds a prefix tree (Trie) from the words list + * and stores it in the Dictionary field. The words list is got from the + * Spell field. The AffixData field is initialized if AF parameter is not + * defined. + * - NISortAffixes(): + * - builds a list of compond affixes from the affix list and stores it + * in the CompoundAffix. + * - builds prefix trees (Trie) from the affix list for prefixes and suffixes + * and stores them in Suffix and Prefix fields. + * The affix list is got from the Affix field. * * Memory management * ----------------- @@ -204,14 +204,14 @@ static int cmpspellaffix(const void *s1, const void *s2) { return (strcmp((*(SPELL *const *) s1)->p.flag, - (*(SPELL *const *) s2)->p.flag)); + (*(SPELL *const *) s2)->p.flag)); } static int cmpcmdflag(const void *f1, const void *f2) { - CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1, - *fv2 = (CompoundAffixFlag *) f2; + CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1, + *fv2 = (CompoundAffixFlag *) f2; Assert(fv1->flagMode == fv2->flagMode); @@ -335,15 +335,15 @@ cmpaffix(const void *s1, const void *s2) * * Depending on the flagMode an affix string can have the following format: * - FM_CHAR: ABCD - * Here we have 4 flags: A, B, C and D + * Here we have 4 flags: A, B, C and D * - FM_LONG: ABCDE* - * Here we have 3 flags: AB, CD and E* + * Here we have 3 flags: AB, CD and E* * - FM_NUM: 200,205,50 - * Here we have 3 flags: 200, 205 and 50 + * Here we have 3 flags: 200, 205 and 50 * * Conf: current dictionary. * sflagset: the set of affix flags. Returns a reference to the start of a next - * affix flag. + * affix flag. * sflag: returns an affix flag from sflagset. */ static void @@ -358,7 +358,7 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1; - while(**sflagset) + while (**sflagset) { switch (Conf->flagMode) { @@ -413,8 +413,8 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid character in affix flag \"%s\"", - *sflagset))); + errmsg("invalid character in affix flag \"%s\"", + *sflagset))); } *sflagset += pg_mblen(*sflagset); @@ -432,8 +432,8 @@ getNextFlagFromString(IspellDict *Conf, char **sflagset, char *sflag) if (Conf->flagMode == FM_LONG && maxstep > 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid affix flag \"%s\" with long flag value", sbuf))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid affix flag \"%s\" with long flag value", sbuf))); *sflag = '\0'; } @@ -527,6 +527,7 @@ NIImportDictionary(IspellDict *Conf, const char *filename) { char *s, *pstr; + /* Set of affix flags */ const char *flag; @@ -581,11 +582,11 @@ NIImportDictionary(IspellDict *Conf, const char *filename) * meter/GMD * * The affix rule with the flag S: - * SFX S y ies [^aeiou]y + * SFX S y ies [^aeiou]y * is not presented here. * * The affix rule with the flag M: - * SFX M 0 's . + * SFX M 0 's . * is presented here. * * Conf: current dictionary. @@ -620,9 +621,9 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag) if (flag == 0) { /* - * The word can be formed only with another word. - * And in the flag parameter there is not a sign - * that we search compound words. + * The word can be formed only with another word. And + * in the flag parameter there is not a sign that we + * search compound words. */ if (StopMiddle->compoundflag & FF_COMPOUNDONLY) return 0; @@ -658,21 +659,21 @@ FindWord(IspellDict *Conf, const char *word, char *affixflag, int flag) * Conf: current dictionary. * flag: affix flag ('\' in the below example). * flagflags: set of flags from the flagval field for this affix rule. This set - * is listed after '/' character in the added string (repl). + * is listed after '/' character in the added string (repl). * - * For example L flag in the hunspell_sample.affix: - * SFX \ 0 Y/L [^Y] + * For example L flag in the hunspell_sample.affix: + * SFX \ 0 Y/L [^Y] * * mask: condition for search ('[^Y]' in the above example). * find: stripping characters from beginning (at prefix) or end (at suffix) - * of the word ('0' in the above example, 0 means that there is not - * stripping character). + * of the word ('0' in the above example, 0 means that there is not + * stripping character). * repl: adding string after stripping ('Y' in the above example). * type: FF_SUFFIX or FF_PREFIX. */ static void -NIAddAffix(IspellDict *Conf, const char* flag, char flagflags, const char *mask, - const char *find, const char *repl, int type) +NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask, + const char *find, const char *repl, int type) { AFFIX *Affix; @@ -1024,8 +1025,8 @@ setCompoundAffixFlagValue(IspellDict *Conf, CompoundAffixFlag *entry, { if (Conf->flagMode == FM_NUM) { - char *next; - int i; + char *next; + int i; i = strtol(s, &next, 10); if (s == next || errno == ERANGE) @@ -1056,10 +1057,10 @@ setCompoundAffixFlagValue(IspellDict *Conf, CompoundAffixFlag *entry, static void addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) { - CompoundAffixFlag *newValue; - char sbuf[BUFSIZ]; - char *sflag; - int clen; + CompoundAffixFlag *newValue; + char sbuf[BUFSIZ]; + char *sflag; + int clen; while (*s && t_isspace(s)) s += pg_mblen(s); @@ -1088,7 +1089,7 @@ addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) Conf->mCompoundAffixFlag *= 2; Conf->CompoundAffixFlags = (CompoundAffixFlag *) repalloc((void *) Conf->CompoundAffixFlags, - Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag)); + Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag)); } else { @@ -1113,11 +1114,11 @@ addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val) static int getCompoundAffixFlagValue(IspellDict *Conf, char *s) { - uint32 flag = 0; + uint32 flag = 0; CompoundAffixFlag *found, - key; - char sflag[BUFSIZ]; - char *flagcur; + key; + char sflag[BUFSIZ]; + char *flagcur; if (Conf->nCompoundAffixFlag == 0) return 0; @@ -1151,8 +1152,8 @@ getAffixFlagSet(IspellDict *Conf, char *s) { if (Conf->useFlagAliases && *s != '\0') { - int curaffix; - char *end; + int curaffix; + char *end; curaffix = strtol(s, &end, 10); if (s == end || errno == ERANGE) @@ -1161,9 +1162,10 @@ getAffixFlagSet(IspellDict *Conf, char *s) errmsg("invalid affix alias \"%s\"", s))); if (curaffix > 0 && curaffix <= Conf->nAffixData) + /* - * Do not subtract 1 from curaffix - * because empty string was added in NIImportOOAffixes + * Do not subtract 1 from curaffix because empty string was added + * in NIImportOOAffixes */ return Conf->AffixData[curaffix]; else @@ -1260,9 +1262,9 @@ NIImportOOAffixes(IspellDict *Conf, const char *filename) Conf->flagMode = FM_NUM; else if (STRNCMP(s, "default") != 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("Ispell dictionary supports only default, " - "long and num flag value"))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("Ispell dictionary supports only default, " + "long and num flag value"))); } } @@ -1303,8 +1305,8 @@ NIImportOOAffixes(IspellDict *Conf, const char *filename) naffix = atoi(sflag); if (naffix == 0) ereport(ERROR, - (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("invalid number of flag vector aliases"))); + (errcode(ERRCODE_CONFIG_FILE_ERROR), + errmsg("invalid number of flag vector aliases"))); /* Also reserve place for empty flag set */ naffix++; @@ -1596,7 +1598,8 @@ MergeAffix(IspellDict *Conf, int a1, int a2) static uint32 makeCompoundFlags(IspellDict *Conf, int affix) { - char *str = Conf->AffixData[affix]; + char *str = Conf->AffixData[affix]; + return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK); } @@ -1700,14 +1703,14 @@ NISortDictionary(IspellDict *Conf) /* compress affixes */ /* - * If we use flag aliases then we need to use Conf->AffixData filled - * in the NIImportOOAffixes(). + * If we use flag aliases then we need to use Conf->AffixData filled in + * the NIImportOOAffixes(). */ if (Conf->useFlagAliases) { for (i = 0; i < Conf->nspell; i++) { - char *end; + char *end; if (*Conf->Spell[i]->p.flag != '\0') { @@ -1762,7 +1765,7 @@ NISortDictionary(IspellDict *Conf) curaffix++; Assert(curaffix < naffix); Conf->AffixData[curaffix] = cpstrdup(Conf, - Conf->Spell[i]->p.flag); + Conf->Spell[i]->p.flag); } Conf->Spell[i]->p.d.affix = curaffix; @@ -2219,8 +2222,8 @@ NormalizeSubWord(IspellDict *Conf, char *word, int flag) if (CheckAffix(newword, swrdlen, prefix->aff[j], flag, pnewword, &baselen)) { /* prefix success */ - char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? - VoidString : prefix->aff[j]->flag; + char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? + VoidString : prefix->aff[j]->flag; if (FindWord(Conf, pnewword, ff, flag)) cur += addToResult(forms, cur, pnewword); diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c index d41f82c479a..80d80f2451a 100644 --- a/src/backend/tsearch/to_tsany.c +++ b/src/backend/tsearch/to_tsany.c @@ -20,8 +20,8 @@ typedef struct MorphOpaque { - Oid cfg_id; - int qoperator; /* query operator */ + Oid cfg_id; + int qoperator; /* query operator */ } MorphOpaque; @@ -274,14 +274,14 @@ to_tsvector(PG_FUNCTION_ARGS) static void pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix) { - int32 count = 0; - ParsedText prs; - uint32 variant, - pos = 0, - cntvar = 0, - cntpos = 0, - cnt = 0; - MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque); + int32 count = 0; + ParsedText prs; + uint32 variant, + pos = 0, + cntvar = 0, + cntpos = 0, + cnt = 0; + MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque); prs.lenwords = 4; prs.curwords = 0; @@ -295,8 +295,8 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, while (count < prs.curwords) { /* - * Were any stop words removed? If so, fill empty positions - * with placeholders linked by an appropriate operator. + * Were any stop words removed? If so, fill empty positions with + * placeholders linked by an appropriate operator. */ if (pos > 0 && pos + 1 < prs.words[count].pos.pos) { @@ -330,7 +330,7 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, prs.words[count].word, prs.words[count].len, weight, - ((prs.words[count].flags & TSL_PREFIX) || prefix)); + ((prs.words[count].flags & TSL_PREFIX) || prefix)); pfree(prs.words[count].word); if (cnt) pushOperator(state, OP_AND, 0); @@ -362,9 +362,9 @@ pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, Datum to_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_AND; @@ -392,9 +392,9 @@ to_tsquery(PG_FUNCTION_ARGS) Datum plainto_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_AND; @@ -423,9 +423,9 @@ plainto_tsquery(PG_FUNCTION_ARGS) Datum phraseto_tsquery_byid(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(1); - TSQuery query; - MorphOpaque data; + text *in = PG_GETARG_TEXT_P(1); + TSQuery query; + MorphOpaque data; data.cfg_id = PG_GETARG_OID(0); data.qoperator = OP_PHRASE; diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index ca352af3315..ac1c4d23162 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -2033,7 +2033,7 @@ static bool checkcondition_HL(void *opaque, QueryOperand *val, ExecPhraseData *data) { int i; - hlCheck *checkval = (hlCheck *) opaque; + hlCheck *checkval = (hlCheck *) opaque; for (i = 0; i < checkval->len; i++) { diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index d2b23d05cbd..fecf605541a 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -5277,12 +5277,12 @@ check_rolespec_name(const Node *node, const char *detail_msg) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role \"%s\" is reserved", - role->rolename), + role->rolename), errdetail("%s", detail_msg))); else ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("role \"%s\" is reserved", - role->rolename))); + role->rolename))); } } diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index 803ba4752e9..c6c296b9609 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -257,7 +257,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen) Size datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen) { - Size sz = sizeof(int); + Size sz = sizeof(int); if (!isnull) { @@ -267,6 +267,7 @@ datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen) else if (VARATT_IS_EXTERNAL_EXPANDED(value)) { ExpandedObjectHeader *eoh = DatumGetEOHP(value); + sz += EOH_get_flat_size(eoh); } else @@ -298,7 +299,7 @@ datumSerialize(Datum value, bool isnull, bool typByVal, int typLen, char **start_address) { ExpandedObjectHeader *eoh = NULL; - int header; + int header; /* Write header word. */ if (isnull) @@ -346,8 +347,8 @@ datumSerialize(Datum value, bool isnull, bool typByVal, int typLen, Datum datumRestore(char **start_address, bool *isnull) { - int header; - void *d; + int header; + void *d; /* Read header word. */ memcpy(&header, *start_address, sizeof(int)); diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index d622e98f30a..ae93fe01679 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -5074,9 +5074,9 @@ numeric_to_number(PG_FUNCTION_ARGS) { Numeric x; Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); + Int32GetDatum(10))); Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(-Num.multi))); + Int32GetDatum(-Num.multi))); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), diff --git a/src/backend/utils/adt/geo_spgist.c b/src/backend/utils/adt/geo_spgist.c index e3945f20810..019015656fc 100644 --- a/src/backend/utils/adt/geo_spgist.c +++ b/src/backend/utils/adt/geo_spgist.c @@ -101,19 +101,19 @@ typedef struct { double low; double high; -} Range; +} Range; typedef struct { Range left; Range right; -} RangeBox; +} RangeBox; typedef struct { RangeBox range_box_x; RangeBox range_box_y; -} RectBox; +} RectBox; /* * Calculate the quadrant @@ -173,7 +173,7 @@ getRangeBox(BOX *box) static RectBox * initRectBox(void) { - RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox)); + RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox)); double infinity = get_float8_infinity(); rect_box->range_box_x.left.low = -infinity; @@ -201,7 +201,7 @@ initRectBox(void) static RectBox * nextRectBox(RectBox *rect_box, RangeBox *centroid, uint8 quadrant) { - RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox)); + RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox)); memcpy(next_rect_box, rect_box, sizeof(RectBox)); @@ -233,7 +233,7 @@ static bool overlap2D(RangeBox *range_box, Range *query) { return FPge(range_box->right.high, query->low) && - FPle(range_box->left.low, query->high); + FPle(range_box->left.low, query->high); } /* Can any rectangle from rect_box overlap with this argument? */ @@ -241,7 +241,7 @@ static bool overlap4D(RectBox *rect_box, RangeBox *query) { return overlap2D(&rect_box->range_box_x, &query->left) && - overlap2D(&rect_box->range_box_y, &query->right); + overlap2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box contain this argument? */ @@ -249,15 +249,15 @@ static bool contain2D(RangeBox *range_box, Range *query) { return FPge(range_box->right.high, query->high) && - FPle(range_box->left.low, query->low); + FPle(range_box->left.low, query->low); } /* Can any rectangle from rect_box contain this argument? */ static bool -contain4D(RectBox *rect_box, RangeBox * query) +contain4D(RectBox *rect_box, RangeBox *query) { return contain2D(&rect_box->range_box_x, &query->left) && - contain2D(&rect_box->range_box_y, &query->right); + contain2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box be contained by this argument? */ @@ -265,9 +265,9 @@ static bool contained2D(RangeBox *range_box, Range *query) { return FPle(range_box->left.low, query->high) && - FPge(range_box->left.high, query->low) && - FPle(range_box->right.low, query->high) && - FPge(range_box->right.high, query->low); + FPge(range_box->left.high, query->low) && + FPle(range_box->right.low, query->high) && + FPge(range_box->right.high, query->low); } /* Can any rectangle from rect_box be contained by this argument? */ @@ -275,7 +275,7 @@ static bool contained4D(RectBox *rect_box, RangeBox *query) { return contained2D(&rect_box->range_box_x, &query->left) && - contained2D(&rect_box->range_box_y, &query->right); + contained2D(&rect_box->range_box_y, &query->right); } /* Can any range from range_box to be lower than this argument? */ @@ -283,7 +283,7 @@ static bool lower2D(RangeBox *range_box, Range *query) { return FPlt(range_box->left.low, query->low) && - FPlt(range_box->right.low, query->low); + FPlt(range_box->right.low, query->low); } /* Can any range from range_box to be higher than this argument? */ @@ -291,7 +291,7 @@ static bool higher2D(RangeBox *range_box, Range *query) { return FPgt(range_box->left.high, query->high) && - FPgt(range_box->right.high, query->high); + FPgt(range_box->right.high, query->high); } /* Can any rectangle from rect_box be left of this argument? */ @@ -396,8 +396,8 @@ spg_box_quad_choose(PG_FUNCTION_ARGS) Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS) { - spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0); - spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1); + spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0); + spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1); BOX *centroid; int median, i; @@ -409,7 +409,7 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS) /* Calculate median of all 4D coordinates */ for (i = 0; i < in->nTuples; i++) { - BOX *box = DatumGetBoxP(in->datums[i]); + BOX *box = DatumGetBoxP(in->datums[i]); lowXs[i] = box->low.x; highXs[i] = box->high.x; @@ -442,13 +442,13 @@ spg_box_quad_picksplit(PG_FUNCTION_ARGS) out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples); /* - * Assign ranges to corresponding nodes according to quadrants - * relative to the "centroid" range + * Assign ranges to corresponding nodes according to quadrants relative to + * the "centroid" range */ for (i = 0; i < in->nTuples; i++) { - BOX *box = DatumGetBoxP(in->datums[i]); - uint8 quadrant = getQuadrant(centroid, box); + BOX *box = DatumGetBoxP(in->datums[i]); + uint8 quadrant = getQuadrant(centroid, box); out->leafTupleDatums[i] = BoxPGetDatum(box); out->mapTuplesToNodes[i] = quadrant; @@ -465,12 +465,12 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) { spgInnerConsistentIn *in = (spgInnerConsistentIn *) PG_GETARG_POINTER(0); spgInnerConsistentOut *out = (spgInnerConsistentOut *) PG_GETARG_POINTER(1); - int i; - MemoryContext old_ctx; - RectBox *rect_box; - uint8 quadrant; - RangeBox *centroid, - **queries; + int i; + MemoryContext old_ctx; + RectBox *rect_box; + uint8 quadrant; + RangeBox *centroid, + **queries; if (in->allTheSame) { @@ -484,8 +484,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) } /* - * We are saving the traversal value or initialize it an unbounded - * one, if we have just begun to walk the tree. + * We are saving the traversal value or initialize it an unbounded one, if + * we have just begun to walk the tree. */ if (in->traversalValue) rect_box = in->traversalValue; @@ -493,8 +493,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) rect_box = initRectBox(); /* - * We are casting the prefix and queries to RangeBoxes for ease of - * the following operations. + * We are casting the prefix and queries to RangeBoxes for ease of the + * following operations. */ centroid = getRangeBox(DatumGetBoxP(in->prefixDatum)); queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *)); @@ -507,15 +507,15 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes); /* - * We switch memory context, because we want to allocate memory for - * new traversal values (next_rect_box) and pass these pieces of - * memory to further call of this function. + * We switch memory context, because we want to allocate memory for new + * traversal values (next_rect_box) and pass these pieces of memory to + * further call of this function. */ old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext); for (quadrant = 0; quadrant < in->nNodes; quadrant++) { - RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant); + RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant); bool flag = true; for (i = 0; i < in->nkeys; i++) @@ -587,8 +587,8 @@ spg_box_quad_inner_consistent(PG_FUNCTION_ARGS) else { /* - * If this node is not selected, we don't need to keep - * the next traversal value in the memory context. + * If this node is not selected, we don't need to keep the next + * traversal value in the memory context. */ pfree(next_rect_box); } diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index e1ceea6da48..ddc34ceec7a 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -1305,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar) case jbvBool: if (aScalar->val.boolean == bScalar->val.boolean) return 0; - else if (aScalar->val.boolean >bScalar->val.boolean) + else if (aScalar->val.boolean > bScalar->val.boolean) return 1; else return -1; diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index fb149dcd477..cb14993542d 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -609,7 +609,7 @@ jsonb_array_element(PG_FUNCTION_ARGS) /* Handle negative subscript */ if (element < 0) { - uint32 nelements = JB_ROOT_COUNT(jb); + uint32 nelements = JB_ROOT_COUNT(jb); if (-element > nelements) PG_RETURN_NULL(); @@ -652,7 +652,7 @@ jsonb_array_element_text(PG_FUNCTION_ARGS) /* Handle negative subscript */ if (element < 0) { - uint32 nelements = JB_ROOT_COUNT(jb); + uint32 nelements = JB_ROOT_COUNT(jb); if (-element > nelements) PG_RETURN_NULL(); @@ -992,7 +992,7 @@ get_array_start(void *state) _state->path_indexes[lex_level] != INT_MIN) { /* Negative subscript -- convert to positive-wise subscript */ - int nelements = json_count_array_elements(_state->lex); + int nelements = json_count_array_elements(_state->lex); if (-_state->path_indexes[lex_level] <= nelements) _state->path_indexes[lex_level] += nelements; @@ -1002,8 +1002,8 @@ get_array_start(void *state) { /* * Special case: we should match the entire array. We only need this - * at the outermost level because at nested levels the match will - * have been started by the outer field or array element callback. + * at the outermost level because at nested levels the match will have + * been started by the outer field or array element callback. */ _state->result_start = _state->lex->token_start; } @@ -3368,9 +3368,9 @@ jsonb_concat(PG_FUNCTION_ARGS) *it2; /* - * If one of the jsonb is empty, just return the other if it's not - * scalar and both are of the same kind. If it's a scalar or they are - * of different kinds we need to perform the concatenation even if one is + * If one of the jsonb is empty, just return the other if it's not scalar + * and both are of the same kind. If it's a scalar or they are of + * different kinds we need to perform the concatenation even if one is * empty. */ if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2)) @@ -3481,7 +3481,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS) it = JsonbIteratorInit(&in->root); r = JsonbIteratorNext(&it, &v, false); - Assert (r == WJB_BEGIN_ARRAY); + Assert(r == WJB_BEGIN_ARRAY); n = v.val.array.nElems; if (idx < 0) @@ -3868,8 +3868,8 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, if (level == path_len - 1) { /* - * called from jsonb_insert(), it forbids redefining - * an existsing value + * called from jsonb_insert(), it forbids redefining an + * existsing value */ if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER)) ereport(ERROR, @@ -3878,7 +3878,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls, errhint("Try using the function jsonb_set " "to replace key value."))); - r = JsonbIteratorNext(it, &v, true); /* skip value */ + r = JsonbIteratorNext(it, &v, true); /* skip value */ if (!(op_type & JB_PATH_DELETE)) { (void) pushJsonbValue(st, WJB_KEY, &k); @@ -4005,8 +4005,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, /* * We should keep current value only in case of - * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER - * because otherwise it should be deleted or replaced + * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because + * otherwise it should be deleted or replaced */ if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE)) (void) pushJsonbValue(st, r, &v); diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 39f43863d62..10133f21fac 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -849,13 +849,13 @@ parse_ident(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("string is not a valid identifier: \"%s\"", text_to_cstring(qualname)), - errdetail("No valid identifier before \".\"."))); + errdetail("No valid identifier before \".\"."))); else if (after_dot) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("string is not a valid identifier: \"%s\"", text_to_cstring(qualname)), - errdetail("No valid identifier after \".\"."))); + errdetail("No valid identifier after \".\"."))); else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 3d21e33a26b..6592ef4d2d9 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -3355,10 +3355,10 @@ numeric_accum(PG_FUNCTION_ARGS) Datum numeric_combine(PG_FUNCTION_ARGS) { - NumericAggState *state1; - NumericAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + NumericAggState *state1; + NumericAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3397,8 +3397,8 @@ numeric_combine(PG_FUNCTION_ARGS) state1->NaNcount += state2->NaNcount; /* - * These are currently only needed for moving aggregates, but let's - * do the right thing anyway... + * These are currently only needed for moving aggregates, but let's do + * the right thing anyway... */ if (state2->maxScale > state1->maxScale) { @@ -3446,10 +3446,10 @@ numeric_avg_accum(PG_FUNCTION_ARGS) Datum numeric_avg_combine(PG_FUNCTION_ARGS) { - NumericAggState *state1; - NumericAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + NumericAggState *state1; + NumericAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3485,8 +3485,8 @@ numeric_avg_combine(PG_FUNCTION_ARGS) state1->NaNcount += state2->NaNcount; /* - * These are currently only needed for moving aggregates, but let's - * do the right thing anyway... + * These are currently only needed for moving aggregates, but let's do + * the right thing anyway... */ if (state2->maxScale > state1->maxScale) { @@ -3518,11 +3518,11 @@ numeric_avg_combine(PG_FUNCTION_ARGS) Datum numeric_avg_serialize(PG_FUNCTION_ARGS) { - NumericAggState *state; - StringInfoData buf; - Datum temp; - bytea *sumX; - bytea *result; + NumericAggState *state; + StringInfoData buf; + Datum temp; + bytea *sumX; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -3549,7 +3549,7 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) pq_sendbytes(&buf, VARDATA(sumX), VARSIZE(sumX) - VARHDRSZ); /* maxScale */ - pq_sendint(&buf, state->maxScale, 4); + pq_sendint(&buf, state->maxScale, 4); /* maxScaleCount */ pq_sendint64(&buf, state->maxScaleCount); @@ -3564,7 +3564,7 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) /* * numeric_avg_deserialize - * Deserialize bytea into NumericAggState for numeric aggregates that + * Deserialize bytea into NumericAggState for numeric aggregates that * don't require sumX2. Deserializes bytea into NumericAggState using the * standard pq API. * @@ -3574,10 +3574,10 @@ numeric_avg_serialize(PG_FUNCTION_ARGS) Datum numeric_avg_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - NumericAggState *result; - Datum temp; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + NumericAggState *result; + Datum temp; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3628,12 +3628,12 @@ numeric_avg_deserialize(PG_FUNCTION_ARGS) Datum numeric_serialize(PG_FUNCTION_ARGS) { - NumericAggState *state; - StringInfoData buf; - Datum temp; - bytea *sumX; - bytea *sumX2; - bytea *result; + NumericAggState *state; + StringInfoData buf; + Datum temp; + bytea *sumX; + bytea *sumX2; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -3667,7 +3667,7 @@ numeric_serialize(PG_FUNCTION_ARGS) pq_sendbytes(&buf, VARDATA(sumX2), VARSIZE(sumX2) - VARHDRSZ); /* maxScale */ - pq_sendint(&buf, state->maxScale, 4); + pq_sendint(&buf, state->maxScale, 4); /* maxScaleCount */ pq_sendint64(&buf, state->maxScaleCount); @@ -3692,10 +3692,10 @@ numeric_serialize(PG_FUNCTION_ARGS) Datum numeric_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - NumericAggState *result; - Datum temp; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + NumericAggState *result; + Datum temp; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -3932,8 +3932,8 @@ numeric_poly_combine(PG_FUNCTION_ARGS) { PolyNumAggState *state1; PolyNumAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4001,11 +4001,11 @@ numeric_poly_combine(PG_FUNCTION_ARGS) Datum numeric_poly_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; - StringInfoData buf; - bytea *sumX; - bytea *sumX2; - bytea *result; + PolyNumAggState *state; + StringInfoData buf; + bytea *sumX; + bytea *sumX2; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -4040,11 +4040,11 @@ numeric_poly_serialize(PG_FUNCTION_ARGS) free_var(&num); #else temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX))); + NumericGetDatum(make_result(&state->sumX))); sumX = DatumGetByteaP(temp); temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX2))); + NumericGetDatum(make_result(&state->sumX2))); sumX2 = DatumGetByteaP(temp); #endif } @@ -4076,11 +4076,11 @@ numeric_poly_serialize(PG_FUNCTION_ARGS) Datum numeric_poly_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - PolyNumAggState *result; - Datum sumX; - Datum sumX2; - StringInfoData buf; + bytea *sstate = PG_GETARG_BYTEA_P(0); + PolyNumAggState *result; + Datum sumX; + Datum sumX2; + StringInfoData buf; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4105,13 +4105,13 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS) /* sumX2 */ sumX2 = DirectFunctionCall3(numeric_recv, - PointerGetDatum(&buf), - InvalidOid, - -1); + PointerGetDatum(&buf), + InvalidOid, + -1); #ifdef HAVE_INT128 { - NumericVar num; + NumericVar num; init_var(&num); set_var_from_num(DatumGetNumeric(sumX), &num); @@ -4170,10 +4170,10 @@ int8_avg_accum(PG_FUNCTION_ARGS) Datum int8_avg_combine(PG_FUNCTION_ARGS) { - PolyNumAggState *state1; - PolyNumAggState *state2; - MemoryContext agg_context; - MemoryContext old_context; + PolyNumAggState *state1; + PolyNumAggState *state2; + MemoryContext agg_context; + MemoryContext old_context; if (!AggCheckCallContext(fcinfo, &agg_context)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4233,10 +4233,10 @@ int8_avg_combine(PG_FUNCTION_ARGS) Datum int8_avg_serialize(PG_FUNCTION_ARGS) { - PolyNumAggState *state; - StringInfoData buf; - bytea *sumX; - bytea *result; + PolyNumAggState *state; + StringInfoData buf; + bytea *sumX; + bytea *result; /* Ensure we disallow calling when not in aggregate context */ if (!AggCheckCallContext(fcinfo, NULL)) @@ -4265,7 +4265,7 @@ int8_avg_serialize(PG_FUNCTION_ARGS) sumX = DatumGetByteaP(temp); #else temp = DirectFunctionCall1(numeric_send, - NumericGetDatum(make_result(&state->sumX))); + NumericGetDatum(make_result(&state->sumX))); sumX = DatumGetByteaP(temp); #endif } @@ -4293,10 +4293,10 @@ int8_avg_serialize(PG_FUNCTION_ARGS) Datum int8_avg_deserialize(PG_FUNCTION_ARGS) { - bytea *sstate = PG_GETARG_BYTEA_P(0); - PolyNumAggState *result; - StringInfoData buf; - Datum temp; + bytea *sstate = PG_GETARG_BYTEA_P(0); + PolyNumAggState *result; + StringInfoData buf; + Datum temp; if (!AggCheckCallContext(fcinfo, NULL)) elog(ERROR, "aggregate function called in non-aggregate context"); @@ -4321,7 +4321,7 @@ int8_avg_deserialize(PG_FUNCTION_ARGS) #ifdef HAVE_INT128 { - NumericVar num; + NumericVar num; init_var(&num); set_var_from_num(DatumGetNumeric(temp), &num); diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index 17c5cb0fb64..1bba5fa8c81 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -27,7 +27,7 @@ #include "utils/inet.h" #include "utils/timestamp.h" -#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var)))) +#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var)))) /* bogus ... these externs should be in a header file */ extern Datum pg_stat_get_numscans(PG_FUNCTION_ARGS); @@ -540,7 +540,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) int num_backends = pgstat_fetch_stat_numbackends(); int curr_backend; char *cmd = text_to_cstring(PG_GETARG_TEXT_PP(0)); - ProgressCommandType cmdtype; + ProgressCommandType cmdtype; TupleDesc tupdesc; Tuplestorestate *tupstore; ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; @@ -582,8 +582,8 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) /* 1-based index */ for (curr_backend = 1; curr_backend <= num_backends; curr_backend++) { - LocalPgBackendStatus *local_beentry; - PgBackendStatus *beentry; + LocalPgBackendStatus *local_beentry; + PgBackendStatus *beentry; Datum values[PG_STAT_GET_PROGRESS_COLS]; bool nulls[PG_STAT_GET_PROGRESS_COLS]; int i; @@ -613,14 +613,14 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS) if (has_privs_of_role(GetUserId(), beentry->st_userid)) { values[2] = ObjectIdGetDatum(beentry->st_progress_command_target); - for(i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) - values[i+3] = Int64GetDatum(beentry->st_progress_param[i]); + for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) + values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]); } else { nulls[2] = true; for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++) - nulls[i+3] = true; + nulls[i + 3] = true; } tuplestore_putvalues(tupstore, tupdesc, values, nulls); @@ -787,7 +787,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) proc = BackendPidGetProc(beentry->st_procpid); if (proc != NULL) { - uint32 raw_wait_event; + uint32 raw_wait_event; raw_wait_event = UINT32_ACCESS_ONCE(proc->wait_event_info); wait_event_type = pgstat_get_wait_event_type(raw_wait_event); diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index 7d35d581126..b89e90f908e 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -760,13 +760,14 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) /* Save previous prefix if needed */ if (needPrevious) { - Datum previousCentroid; + Datum previousCentroid; - /* We know, that in->prefixDatum in this place is varlena, + /* + * We know, that in->prefixDatum in this place is varlena, * because it's range */ previousCentroid = datumCopy(in->prefixDatum, false, -1); - out->traversalValues[out->nNodes] = (void *)previousCentroid; + out->traversalValues[out->nNodes] = (void *) previousCentroid; } out->nodeNumbers[out->nNodes] = i - 1; out->nNodes++; diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c index ebc11c9e4eb..b0963291433 100644 --- a/src/backend/utils/adt/tsginidx.c +++ b/src/backend/utils/adt/tsginidx.c @@ -184,8 +184,8 @@ checkcondition_gin_internal(GinChkVal *gcv, QueryOperand *val, ExecPhraseData *d int j; /* - * if any val requiring a weight is used or caller - * needs position information then set recheck flag + * if any val requiring a weight is used or caller needs position + * information then set recheck flag */ if (val->weight != 0 || data != NULL) *gcv->need_recheck = true; @@ -236,9 +236,10 @@ TS_execute_ternary(GinChkVal *gcv, QueryItem *curitem) return !result; case OP_PHRASE: + /* - * GIN doesn't contain any information about positions, - * treat OP_PHRASE as OP_AND with recheck requirement + * GIN doesn't contain any information about positions, treat + * OP_PHRASE as OP_AND with recheck requirement */ *gcv->need_recheck = true; /* FALL THRU */ diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index eea6e0eae17..21a18bfbc44 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -24,12 +24,12 @@ #include "utils/pg_crc.h" /* FTS operator priorities, see ts_type.h */ -const int tsearch_op_priority[OP_COUNT] = +const int tsearch_op_priority[OP_COUNT] = { - 3, /* OP_NOT */ - 2, /* OP_AND */ - 1, /* OP_OR */ - 4 /* OP_PHRASE */ + 3, /* OP_NOT */ + 2, /* OP_AND */ + 1, /* OP_OR */ + 4 /* OP_PHRASE */ }; struct TSQueryParserStateData @@ -128,15 +128,15 @@ parse_phrase_operator(char *buf, int16 *distance) PHRASE_CLOSE, PHRASE_ERR, PHRASE_FINISH - } state = PHRASE_OPEN; + } state = PHRASE_OPEN; - char *ptr = buf; - char *endptr; - long l = 1; + char *ptr = buf; + char *endptr; + long l = 1; while (*ptr) { - switch(state) + switch (state) { |