diff options
author | Tom Lane | 2022-05-12 19:17:30 +0000 |
---|---|---|
committer | Tom Lane | 2022-05-12 19:17:30 +0000 |
commit | 23e7b38bfe396f919fdb66057174d29e17086418 (patch) | |
tree | 335c3962ef8afe0f6193d0413dbc51642276b147 /src/backend | |
parent | 93909599cdba64c8759d646983c0a4ef93de1e50 (diff) |
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files.
I manually fixed a couple of comments that pgindent uglified.
Diffstat (limited to 'src/backend')
128 files changed, 1319 insertions, 1257 deletions
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c index 7052ac99780..576e585a89f 100644 --- a/src/backend/access/common/toast_internals.c +++ b/src/backend/access/common/toast_internals.c @@ -663,9 +663,9 @@ init_toast_snapshot(Snapshot toast_snapshot) /* * Catalog snapshots can be returned by GetOldestSnapshot() even if not * registered or active. That easily hides bugs around not having a - * snapshot set up - most of the time there is a valid catalog - * snapshot. So additionally insist that the current snapshot is - * registered or active. + * snapshot set up - most of the time there is a valid catalog snapshot. + * So additionally insist that the current snapshot is registered or + * active. */ Assert(HaveRegisteredOrActiveSnapshot()); diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 98d31de0031..9f43bbe25f5 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -68,9 +68,9 @@ typedef struct /* * Tuple visibility is only computed once for each tuple, for correctness - * and efficiency reasons; see comment in heap_page_prune() for - * details. This is of type int8[,] instead of HTSV_Result[], so we can use - * -1 to indicate no visibility has been computed, e.g. for LP_DEAD items. + * and efficiency reasons; see comment in heap_page_prune() for details. + * This is of type int8[], instead of HTSV_Result[], so we can use -1 to + * indicate no visibility has been computed, e.g. for LP_DEAD items. * * Same indexing as ->marked. */ @@ -203,8 +203,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer) */ if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree) { - int ndeleted, - nnewlpdead; + int ndeleted, + nnewlpdead; ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin, limited_ts, &nnewlpdead, NULL); @@ -267,7 +267,7 @@ heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, - int *nnewlpdead, + int *nnewlpdead, OffsetNumber *off_loc) { int ndeleted = 0; diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 9482f99e68b..b802ed247e7 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -326,7 +326,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params, PGRUsage ru0; TimestampTz starttime = 0; PgStat_Counter startreadtime = 0, - startwritetime = 0; + startwritetime = 0; WalUsage startwalusage = pgWalUsage; int64 StartPageHit = VacuumPageHit, StartPageMiss = VacuumPageMiss, @@ -2232,12 +2232,12 @@ lazy_vacuum(LVRelState *vacrel) * dead_items space is not CPU cache resident. * * We don't take any special steps to remember the LP_DEAD items (such - * as counting them in our final update to the stats system) when - * the optimization is applied. Though the accounting used in - * analyze.c's acquire_sample_rows() will recognize the same LP_DEAD - * items as dead rows in its own stats report, that's okay. - * The discrepancy should be negligible. If this optimization is ever - * expanded to cover more cases then this may need to be reconsidered. + * as counting them in our final update to the stats system) when the + * optimization is applied. Though the accounting used in analyze.c's + * acquire_sample_rows() will recognize the same LP_DEAD items as dead + * rows in its own stats report, that's okay. The discrepancy should + * be negligible. If this optimization is ever expanded to cover more + * cases then this may need to be reconsidered. */ threshold = (double) vacrel->rel_pages * BYPASS_THRESHOLD_PAGES; bypass = (vacrel->lpdead_item_pages < threshold && diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index e739c4a3bd9..90b6ac2884d 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -411,8 +411,8 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI parsed.tsId, xlrec->initfileinval); /* - * Check if the replication origin has been set in this record in the - * same way as PrepareRedoAdd(). + * Check if the replication origin has been set in this record in the same + * way as PrepareRedoAdd(). */ if (origin_id != InvalidRepOriginId) appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s", diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index c0dfea40c70..fefc563323d 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -210,7 +210,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty, bool detailed_format, StringInfo buf, uint32 *fpi_len) { - int block_id; + int block_id; Assert(record != NULL); diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c index e1d6ebbd3db..8ed69244e39 100644 --- a/src/backend/access/transam/rmgr.c +++ b/src/backend/access/transam/rmgr.c @@ -38,7 +38,7 @@ #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask,decode) \ { name, redo, desc, identify, startup, cleanup, mask, decode }, -RmgrData RmgrTable[RM_MAX_ID + 1] = { +RmgrData RmgrTable[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" }; @@ -125,8 +125,8 @@ RegisterCustomRmgr(RmgrId rmid, RmgrData *rmgr) if (!pg_strcasecmp(RmgrTable[existing_rmid].rm_name, rmgr->rm_name)) ereport(ERROR, - (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid), - errdetail("Existing resource manager with ID %d has the same name.", existing_rmid))); + (errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid), + errdetail("Existing resource manager with ID %d has the same name.", existing_rmid))); } /* register it */ diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index dc0266693e3..75551f60cbc 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1119,7 +1119,7 @@ StartPrepare(GlobalTransaction gxact) if (hdr.nabortstats > 0) { save_state_data(abortstats, - hdr.nabortstats * sizeof(xl_xact_stats_item)); + hdr.nabortstats * sizeof(xl_xact_stats_item)); pfree(abortstats); } if (hdr.ninvalmsgs > 0) @@ -1529,9 +1529,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit) bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode)); abortrels = (RelFileNode *) bufptr; bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode)); - commitstats = (xl_xact_stats_item*) bufptr; + commitstats = (xl_xact_stats_item *) bufptr; bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item)); - abortstats = (xl_xact_stats_item*) bufptr; + abortstats = (xl_xact_stats_item *) bufptr; bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item)); invalmsgs = (SharedInvalidationMessage *) bufptr; bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage)); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 36852f23277..71136b11a2a 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -435,10 +435,10 @@ typedef struct XLogCtlInsert bool fullPageWrites; /* - * runningBackups is a counter indicating the number of backups currently in - * progress. forcePageWrites is set to true when runningBackups is non-zero. - * lastBackupStart is the latest checkpoint redo location used as a starting - * point for an online backup. + * runningBackups is a counter indicating the number of backups currently + * in progress. forcePageWrites is set to true when runningBackups is + * non-zero. lastBackupStart is the latest checkpoint redo location used + * as a starting point for an online backup. */ int runningBackups; XLogRecPtr lastBackupStart; @@ -5307,14 +5307,14 @@ StartupXLOG(void) * When recovering from a backup (we are in recovery, and archive recovery * was requested), complain if we did not roll forward far enough to reach * the point where the database is consistent. For regular online - * backup-from-primary, that means reaching the end-of-backup WAL record (at - * which point we reset backupStartPoint to be Invalid), for + * backup-from-primary, that means reaching the end-of-backup WAL record + * (at which point we reset backupStartPoint to be Invalid), for * backup-from-replica (which can't inject records into the WAL stream), * that point is when we reach the minRecoveryPoint in pg_control (which - * we purposfully copy last when backing up from a replica). For pg_rewind - * (which creates a backup_label with a method of "pg_rewind") or - * snapshot-style backups (which don't), backupEndRequired will be set to - * false. + * we purposefully copy last when backing up from a replica). For + * pg_rewind (which creates a backup_label with a method of "pg_rewind") + * or snapshot-style backups (which don't), backupEndRequired will be set + * to false. * * Note: it is indeed okay to look at the local variable * LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint @@ -5328,8 +5328,8 @@ StartupXLOG(void) /* * Ran off end of WAL before reaching end-of-backup WAL record, or * minRecoveryPoint. That's a bad sign, indicating that you tried to - * recover from an online backup but never called pg_backup_stop(), - * or you didn't archive all the WAL needed. + * recover from an online backup but never called pg_backup_stop(), or + * you didn't archive all the WAL needed. */ if (ArchiveRecoveryRequested || ControlFile->backupEndRequired) { @@ -8481,8 +8481,8 @@ do_pg_backup_stop(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) WALInsertLockAcquireExclusive(); /* - * It is expected that each do_pg_backup_start() call is matched by exactly - * one do_pg_backup_stop() call. + * It is expected that each do_pg_backup_start() call is matched by + * exactly one do_pg_backup_stop() call. */ Assert(XLogCtl->Insert.runningBackups > 0); XLogCtl->Insert.runningBackups--; diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index a2657a20058..4101a30e374 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -497,15 +497,15 @@ XLogArchiveNotify(const char *xlog) } /* - * Timeline history files are given the highest archival priority to - * lower the chance that a promoted standby will choose a timeline that - * is already in use. However, the archiver ordinarily tries to gather + * Timeline history files are given the highest archival priority to lower + * the chance that a promoted standby will choose a timeline that is + * already in use. However, the archiver ordinarily tries to gather * multiple files to archive from each scan of the archive_status - * directory, which means that newly created timeline history files - * could be left unarchived for a while. To ensure that the archiver - * picks up timeline history files as soon as possible, we force the - * archiver to scan the archive_status directory the next time it looks - * for a file to archive. + * directory, which means that newly created timeline history files could + * be left unarchived for a while. To ensure that the archiver picks up + * timeline history files as soon as possible, we force the archiver to + * scan the archive_status directory the next time it looks for a file to + * archive. */ if (IsTLHistoryFileName(xlog)) PgArchForceDirScan(); diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index b61ae6c0b4a..02bd919ff64 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -74,8 +74,8 @@ pg_backup_start(PG_FUNCTION_ARGS) errmsg("a backup is already in progress in this session"))); /* - * Label file and tablespace map file need to be long-lived, since - * they are read in pg_backup_stop. + * Label file and tablespace map file need to be long-lived, since they + * are read in pg_backup_stop. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); label_file = makeStringInfo(); @@ -127,8 +127,8 @@ pg_backup_stop(PG_FUNCTION_ARGS) errhint("Did you call pg_backup_start()?"))); /* - * Stop the backup. Return a copy of the backup label and tablespace map so - * they can be written to disk by the caller. + * Stop the backup. Return a copy of the backup label and tablespace map + * so they can be written to disk by the caller. */ stoppoint = do_pg_backup_stop(label_file->data, waitforarchive, NULL); diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c index 39ef865ed92..6eba6264202 100644 --- a/src/backend/access/transam/xlogrecovery.c +++ b/src/backend/access/transam/xlogrecovery.c @@ -1205,9 +1205,9 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI, * method was used) or if this label came from somewhere else (the only * other option today being from pg_rewind). If this was a streamed * backup then we know that we need to play through until we get to the - * end of the WAL which was generated during the backup (at which point - * we will have reached consistency and backupEndRequired will be reset - * to be false). + * end of the WAL which was generated during the backup (at which point we + * will have reached consistency and backupEndRequired will be reset to be + * false). */ if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1) { @@ -2055,10 +2055,9 @@ CheckRecoveryConsistency(void) /* * Have we passed our safe starting point? Note that minRecoveryPoint is - * known to be incorrectly set if recovering from a backup, until - * the XLOG_BACKUP_END arrives to advise us of the correct - * minRecoveryPoint. All we know prior to that is that we're not - * consistent yet. + * known to be incorrectly set if recovering from a backup, until the + * XLOG_BACKUP_END arrives to advise us of the correct minRecoveryPoint. + * All we know prior to that is that we're not consistent yet. */ if (!reachedConsistency && !backupEndRequired && minRecoveryPoint <= lastReplayedEndRecPtr) @@ -3802,7 +3801,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, HandleStartupProcInterrupts(); } - return XLREAD_FAIL; /* not reached */ + return XLREAD_FAIL; /* not reached */ } diff --git a/src/backend/access/transam/xlogstats.c b/src/backend/access/transam/xlogstats.c index 6524a1ad0b9..514181792dc 100644 --- a/src/backend/access/transam/xlogstats.c +++ b/src/backend/access/transam/xlogstats.c @@ -22,7 +22,7 @@ void XLogRecGetLen(XLogReaderState *record, uint32 *rec_len, uint32 *fpi_len) { - int block_id; + int block_id; /* * Calculate the amount of FPI data in the record. @@ -53,10 +53,10 @@ XLogRecGetLen(XLogReaderState *record, uint32 *rec_len, void XLogRecStoreStats(XLogStats *stats, XLogReaderState *record) { - RmgrId rmid; - uint8 recid; - uint32 rec_len; - uint32 fpi_len; + RmgrId rmid; + uint8 recid; + uint32 rec_len; + uint32 fpi_len; Assert(stats != NULL && record != NULL); diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 29419c10a88..48516694f08 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -80,10 +80,9 @@ typedef struct xl_invalid_page static HTAB *invalid_page_tab = NULL; -static int -read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, - int reqLen, XLogRecPtr targetRecPtr, - char *cur_page, bool wait_for_wal); +static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, + int reqLen, XLogRecPtr targetRecPtr, + char *cur_page, bool wait_for_wal); /* Report a reference to an invalid page */ static void @@ -940,8 +939,8 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, * archive in the timeline will get renamed to .partial by * StartupXLOG(). * - * If that happens after our caller determined the TLI but before - * we actually read the xlog page, we might still try to read from the + * If that happens after our caller determined the TLI but before we + * actually read the xlog page, we might still try to read from the * old (now renamed) segment and fail. There's not much we can do * about this, but it can only happen when we're a leaf of a cascading * standby whose primary gets promoted while we're decoding, so a @@ -965,7 +964,7 @@ read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, * end of WAL has been reached. */ private_data = (ReadLocalXLogPageNoWaitPrivate *) - state->private_data; + state->private_data; private_data->end_of_wal = true; break; } diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm index ece0a934f05..e91a8e10a8d 100644 --- a/src/backend/catalog/Catalog.pm +++ b/src/backend/catalog/Catalog.pm @@ -41,12 +41,12 @@ sub ParseHeader my $is_varlen = 0; my $is_client_code = 0; - $catalog{columns} = []; - $catalog{toasting} = []; - $catalog{indexing} = []; - $catalog{other_oids} = []; + $catalog{columns} = []; + $catalog{toasting} = []; + $catalog{indexing} = []; + $catalog{other_oids} = []; $catalog{foreign_keys} = []; - $catalog{client_code} = []; + $catalog{client_code} = []; open(my $ifh, '<', $input_file) || die "$input_file: $!"; @@ -96,7 +96,9 @@ sub ParseHeader push @{ $catalog{toasting} }, { parent_table => $1, toast_oid => $2, toast_index_oid => $3 }; } - elsif (/^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/) + elsif ( + /^DECLARE_TOAST_WITH_MACRO\(\s*(\w+),\s*(\d+),\s*(\d+),\s*(\w+),\s*(\w+)\)/ + ) { push @{ $catalog{toasting} }, { @@ -108,16 +110,17 @@ sub ParseHeader }; } elsif ( - /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/) + /^DECLARE_(UNIQUE_)?INDEX(_PKEY)?\(\s*(\w+),\s*(\d+),\s*(\w+),\s*(.+)\)/ + ) { push @{ $catalog{indexing} }, { is_unique => $1 ? 1 : 0, is_pkey => $2 ? 1 : 0, - index_name => $3, - index_oid => $4, + index_name => $3, + index_oid => $4, index_oid_macro => $5, - index_decl => $6 + index_decl => $6 }; } elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/) diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index f4ec6d6d40c..17b2c5e3f3d 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -814,7 +814,7 @@ Catalog::RenameTempFile($schemafile, $tmpext); Catalog::RenameTempFile($fk_info_file, $tmpext); Catalog::RenameTempFile($constraints_file, $tmpext); -exit ($num_errors != 0 ? 1 : 0); +exit($num_errors != 0 ? 1 : 0); #################### Subroutines ######################## @@ -916,11 +916,11 @@ sub morph_row_for_pgattr # Copy the type data from pg_type, and add some type-dependent items my $type = $types{$atttype}; - $row->{atttypid} = $type->{oid}; - $row->{attlen} = $type->{typlen}; - $row->{attbyval} = $type->{typbyval}; - $row->{attalign} = $type->{typalign}; - $row->{attstorage} = $type->{typstorage}; + $row->{atttypid} = $type->{oid}; + $row->{attlen} = $type->{typlen}; + $row->{attbyval} = $type->{typbyval}; + $row->{attalign} = $type->{typalign}; + $row->{attstorage} = $type->{typstorage}; # set attndims if it's an array type $row->{attndims} = $type->{typcategory} eq 'A' ? '1' : '0'; diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 9b512ccd3c0..800f85ed7db 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1198,7 +1198,7 @@ heap_create_with_catalog(const char *relname, if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("toast relfilenode value not set when in binary upgrade mode"))); + errmsg("toast relfilenode value not set when in binary upgrade mode"))); relfilenode = binary_upgrade_next_toast_pg_class_relfilenode; binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid; @@ -1265,8 +1265,8 @@ heap_create_with_catalog(const char *relname, * remove the disk file again.) * * NB: Note that passing create_storage = true is correct even for binary - * upgrade. The storage we create here will be replaced later, but we need - * to have something on disk in the meanwhile. + * upgrade. The storage we create here will be replaced later, but we + * need to have something on disk in the meanwhile. */ new_rel_desc = heap_create(relname, relnamespace, @@ -3219,9 +3219,8 @@ restart: /* * If this constraint has a parent constraint which we have not seen * yet, keep track of it for the second loop, below. Tracking parent - * constraints allows us to climb up to the top-level constraint - * and look for all possible relations referencing the partitioned - * table. + * constraints allows us to climb up to the top-level constraint and + * look for all possible relations referencing the partitioned table. */ if (OidIsValid(con->conparentid) && !list_member_oid(parent_cons, con->conparentid)) diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 7539742c782..bdd3c348417 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -928,9 +928,9 @@ index_create(Relation heapRelation, binary_upgrade_next_index_pg_class_relfilenode = InvalidOid; /* - * Note that we want create_storage = true for binary upgrade. - * The storage we create here will be replaced later, but we need - * to have something on disk in the meanwhile. + * Note that we want create_storage = true for binary upgrade. The + * storage we create here will be replaced later, but we need to + * have something on disk in the meanwhile. */ Assert(create_storage); } diff --git a/src/backend/catalog/objectaccess.c b/src/backend/catalog/objectaccess.c index 38922294e28..1c51df02d21 100644 --- a/src/backend/catalog/objectaccess.c +++ b/src/backend/catalog/objectaccess.c @@ -156,7 +156,7 @@ RunFunctionExecuteHook(Oid objectId) */ void RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId, - bool is_internal) + bool is_internal) { ObjectAccessPostCreate pc_arg; @@ -167,8 +167,8 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId, pc_arg.is_internal = is_internal; (*object_access_hook_str) (OAT_POST_CREATE, - classId, objectName, subId, - (void *) &pc_arg); + classId, objectName, subId, + (void *) &pc_arg); } /* @@ -178,7 +178,7 @@ RunObjectPostCreateHookStr(Oid classId, const char *objectName, int subId, */ void RunObjectDropHookStr(Oid classId, const char *objectName, int subId, - int dropflags) + int dropflags) { ObjectAccessDrop drop_arg; @@ -189,8 +189,8 @@ RunObjectDropHookStr(Oid classId, const char *objectName, int subId, drop_arg.dropflags = dropflags; (*object_access_hook_str) (OAT_DROP, - classId, objectName, subId, - (void *) &drop_arg); + classId, objectName, subId, + (void *) &drop_arg); } /* @@ -205,8 +205,8 @@ RunObjectTruncateHookStr(const char *objectName) Assert(object_access_hook_str != NULL); (*object_access_hook_str) (OAT_TRUNCATE, - RelationRelationId, objectName, 0, - NULL); + RelationRelationId, objectName, 0, + NULL); } /* @@ -216,7 +216,7 @@ RunObjectTruncateHookStr(const char *objectName) */ void RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId, - Oid auxiliaryId, bool is_internal) + Oid auxiliaryId, bool is_internal) { ObjectAccessPostAlter pa_arg; @@ -228,8 +228,8 @@ RunObjectPostAlterHookStr(Oid classId, const char *objectName, int subId, pa_arg.is_internal = is_internal; (*object_access_hook_str) (OAT_POST_ALTER, - classId, objectName, subId, - (void *) &pa_arg); + classId, objectName, subId, + (void *) &pa_arg); } /* @@ -250,8 +250,8 @@ RunNamespaceSearchHookStr(const char *objectName, bool ereport_on_violation) ns_arg.result = true; (*object_access_hook_str) (OAT_NAMESPACE_SEARCH, - NamespaceRelationId, objectName, 0, - (void *) &ns_arg); + NamespaceRelationId, objectName, 0, + (void *) &ns_arg); return ns_arg.result; } @@ -268,6 +268,6 @@ RunFunctionExecuteHookStr(const char *objectName) Assert(object_access_hook_str != NULL); (*object_access_hook_str) (OAT_FUNCTION_EXECUTE, - ProcedureRelationId, objectName, 0, - NULL); + ProcedureRelationId, objectName, 0, + NULL); } diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 472dbda2116..489f0b2818e 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -145,7 +145,7 @@ CreateConstraintEntry(const char *constraintName, for (i = 0; i < numFkDeleteSetCols; i++) fkdatums[i] = Int16GetDatum(fkDeleteSetCols[i]); confdelsetcolsArray = construct_array(fkdatums, numFkDeleteSetCols, - INT2OID, 2, true, TYPALIGN_SHORT); + INT2OID, 2, true, TYPALIGN_SHORT); } else confdelsetcolsArray = NULL; @@ -1291,7 +1291,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks, } else { - int num_delete_cols; + int num_delete_cols; arr = DatumGetArrayTypeP(adatum); /* ensure not toasted */ if (ARR_NDIM(arr) != 1 || @@ -1301,7 +1301,7 @@ DeconstructFkConstraintRow(HeapTuple tuple, int *numfks, num_delete_cols = ARR_DIMS(arr)[0]; memcpy(fk_del_set_cols, ARR_DATA_PTR(arr), num_delete_cols * sizeof(int16)); if ((Pointer) arr != DatumGetPointer(adatum)) - pfree(arr); /* free de-toasted copy, if any */ + pfree(arr); /* free de-toasted copy, if any */ *num_fk_del_set_cols = num_delete_cols; } diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c index 2631558ff11..e2c8bcb2797 100644 --- a/src/backend/catalog/pg_publication.c +++ b/src/backend/catalog/pg_publication.c @@ -378,9 +378,9 @@ publication_add_relation(Oid pubid, PublicationRelInfo *pri, check_publication_add_relation(targetrel); /* - * Translate column names to attnums and make sure the column list contains - * only allowed elements (no system or generated columns etc.). Also build - * an array of attnums, for storing in the catalog. + * Translate column names to attnums and make sure the column list + * contains only allowed elements (no system or generated columns etc.). + * Also build an array of attnums, for storing in the catalog. */ publication_translate_columns(pri->relation, pri->columns, &natts, &attarray); @@ -555,11 +555,11 @@ pub_collist_to_bitmapset(Bitmapset *columns, Datum pubcols, MemoryContext mcxt) ArrayType *arr; int nelems; int16 *elems; - MemoryContext oldcxt = NULL; + MemoryContext oldcxt = NULL; /* - * If an existing bitmap was provided, use it. Otherwise just use NULL - * and build a new bitmap. + * If an existing bitmap was provided, use it. Otherwise just use NULL and + * build a new bitmap. */ if (columns) result = columns; diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index e4d000d4fe8..cd31e68e95e 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -340,13 +340,13 @@ RelationTruncate(Relation rel, BlockNumber nblocks) * is in progress. * * The truncation operation might drop buffers that the checkpoint - * otherwise would have flushed. If it does, then it's essential that - * the files actually get truncated on disk before the checkpoint record - * is written. Otherwise, if reply begins from that checkpoint, the + * otherwise would have flushed. If it does, then it's essential that the + * files actually get truncated on disk before the checkpoint record is + * written. Otherwise, if reply begins from that checkpoint, the * to-be-truncated blocks might still exist on disk but have older - * contents than expected, which can cause replay to fail. It's OK for - * the blocks to not exist on disk at all, but not for them to have the - * wrong contents. + * contents than expected, which can cause replay to fail. It's OK for the + * blocks to not exist on disk at all, but not for them to have the wrong + * contents. */ Assert((MyProc->delayChkptFlags & DELAY_CHKPT_COMPLETE) == 0); MyProc->delayChkptFlags |= DELAY_CHKPT_COMPLETE; diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 305226692a4..2da6b75a155 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -429,7 +429,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params, */ if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - List *idxs = RelationGetIndexList(onerel); + List *idxs = RelationGetIndexList(onerel); Irel = NULL; nindexes = 0; @@ -680,10 +680,10 @@ do_analyze_rel(Relation onerel, VacuumParams *params, } /* - * Now report ANALYZE to the cumulative stats system. For regular tables, we do - * it only if not doing inherited stats. For partitioned tables, we only - * do it for inherited stats. (We're never called for not-inherited stats - * on partitioned tables anyway.) + * Now report ANALYZE to the cumulative stats system. For regular tables, + * we do it only if not doing inherited stats. For partitioned tables, we + * only do it for inherited stats. (We're never called for not-inherited + * stats on partitioned tables anyway.) * * Reset the changes_since_analyze counter only if we analyzed all * columns; otherwise, there is still work for auto-analyze to do. diff --git a/src/backend/commands/collationcmds.c b/src/backend/commands/collationcmds.c index 346f85f05ea..fcfc02d2aed 100644 --- a/src/backend/commands/collationcmds.c +++ b/src/backend/commands/collationcmds.c @@ -246,8 +246,9 @@ DefineCollation(ParseState *pstate, List *names, List *parameters, bool if_not_e /* * Nondeterministic collations are currently only supported with ICU - * because that's the only case where it can actually make a difference. - * So we can save writing the code for the other providers. + * because that's the only case where it can actually make a + * difference. So we can save writing the code for the other + * providers. */ if (!collisdeterministic && collprovider != COLLPROVIDER_ICU) ereport(ERROR, diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 689713ea580..f448d39c7ed 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -345,7 +345,7 @@ defGetCopyHeaderChoice(DefElem *def) break; default: { - char *sval = defGetString(def); + char *sval = defGetString(def); /* * The set of strings accepted here should match up with the @@ -365,8 +365,8 @@ defGetCopyHeaderChoice(DefElem *def) break; } ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("%s requires a Boolean value or \"match\"", + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("%s requires a Boolean value or \"match\"", def->defname))); return COPY_HEADER_FALSE; /* keep compiler quiet */ } diff --git a/src/backend/commands/copyfromparse.c b/src/backend/commands/copyfromparse.c index 58017ec53b0..edb80e2cd52 100644 --- a/src/backend/commands/copyfromparse.c +++ b/src/backend/commands/copyfromparse.c @@ -800,7 +800,8 @@ NextCopyFromRawFields(CopyFromState cstate, char ***fields, int *nfields) errmsg("column name mismatch in header line field %d: got null value (\"%s\"), expected \"%s\"", fldnum, cstate->opts.null_print, NameStr(attr->attname)))); - if (namestrcmp(&attr->attname, colName) != 0) { + if (namestrcmp(&attr->attname, colName) != 0) + { ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), errmsg("column name mismatch in header line field %d: got \"%s\", expected \"%s\"", diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c index 643bbf286e5..fca29a9a105 100644 --- a/src/backend/commands/copyto.c +++ b/src/backend/commands/copyto.c @@ -439,8 +439,8 @@ BeginCopyTo(ParseState *pstate, * locks on the source table(s). */ rewritten = pg_analyze_and_rewrite_fixedparams(raw_query, - pstate->p_sourcetext, NULL, 0, - NULL); + pstate->p_sourcetext, NULL, 0, + NULL); /* check that we got back something we can work with */ if (rewritten == NIL) @@ -862,7 +862,7 @@ DoCopyTo(CopyToState cstate) if (cstate->opts.csv_mode) CopyAttributeOutCSV(cstate, colname, false, - list_length(cstate->attnumlist) == 1); + list_length(cstate->attnumlist) == 1); else CopyAttributeOutText(cstate, colname); } diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 6da58437c58..f2691684010 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -201,9 +201,9 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid, * * We typically do not read relation data into shared_buffers without * holding a relation lock. It's unclear what could go wrong if we - * skipped it in this case, because nobody can be modifying either - * the source or destination database at this point, and we have locks - * on both databases, too, but let's take the conservative route. + * skipped it in this case, because nobody can be modifying either the + * source or destination database at this point, and we have locks on + * both databases, too, but let's take the conservative route. */ dstrelid.relId = srcrelid.relId = relinfo->reloid; LockRelationId(&srcrelid, AccessShareLock); @@ -274,9 +274,9 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath) /* * We can't use a real relcache entry for a relation in some other - * database, but since we're only going to access the fields related - * to physical storage, a fake one is good enough. If we didn't do this - * and used the smgr layer directly, we would have to worry about + * database, but since we're only going to access the fields related to + * physical storage, a fake one is good enough. If we didn't do this and + * used the smgr layer directly, we would have to worry about * invalidations. */ rel = CreateFakeRelcacheEntry(rnode); @@ -333,10 +333,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid, char *srcpath, List *rnodelist, Snapshot snapshot) { - BlockNumber blkno = BufferGetBlockNumber(buf); - OffsetNumber offnum; - OffsetNumber maxoff; - HeapTupleData tuple; + BlockNumber blkno = BufferGetBlockNumber(buf); + OffsetNumber offnum; + OffsetNumber maxoff; + HeapTupleData tuple; maxoff = PageGetMaxOffsetNumber(page); @@ -368,10 +368,10 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid, CreateDBRelInfo *relinfo; /* - * ScanSourceDatabasePgClassTuple is in charge of constructing - * a CreateDBRelInfo object for this tuple, but can also decide - * that this tuple isn't something we need to copy. If we do need - * to copy the relation, add it to the list. + * ScanSourceDatabasePgClassTuple is in charge of constructing a + * CreateDBRelInfo object for this tuple, but can also decide that + * this tuple isn't something we need to copy. If we do need to + * copy the relation, add it to the list. */ relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid, srcpath); @@ -395,9 +395,9 @@ CreateDBRelInfo * ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid, char *srcpath) { - CreateDBRelInfo *relinfo; - Form_pg_class classForm; - Oid relfilenode = InvalidOid; + CreateDBRelInfo *relinfo; + Form_pg_class classForm; + Oid relfilenode = InvalidOid; classForm = (Form_pg_class) GETSTRUCT(tuple); @@ -406,11 +406,11 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid, * * Shared objects don't need to be copied, because they are shared. * Objects without storage can't be copied, because there's nothing to - * copy. Temporary relations don't need to be copied either, because - * they are inaccessible outside of the session that created them, - * which must be gone already, and couldn't connect to a different database - * if it still existed. autovacuum will eventually remove the pg_class - * entries as well. + * copy. Temporary relations don't need to be copied either, because they + * are inaccessible outside of the session that created them, which must + * be gone already, and couldn't connect to a different database if it + * still existed. autovacuum will eventually remove the pg_class entries + * as well. */ if (classForm->reltablespace == GLOBALTABLESPACE_OID || !RELKIND_HAS_STORAGE(classForm->relkind) || @@ -702,7 +702,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) DefElem *dcollate = NULL; DefElem *dctype = NULL; DefElem *diculocale = NULL; - DefElem *dlocprovider = NULL; + DefElem *dlocprovider = NULL; DefElem *distemplate = NULL; DefElem *dallowconnections = NULL; DefElem *dconnlimit = NULL; @@ -824,10 +824,10 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) /* * We don't normally permit new databases to be created with * system-assigned OIDs. pg_upgrade tries to preserve database - * OIDs, so we can't allow any database to be created with an - * OID that might be in use in a freshly-initialized cluster - * created by some future version. We assume all such OIDs will - * be from the system-managed OID range. + * OIDs, so we can't allow any database to be created with an OID + * that might be in use in a freshly-initialized cluster created + * by some future version. We assume all such OIDs will be from + * the system-managed OID range. * * As an exception, however, we permit any OID to be assigned when * allow_system_table_mods=on (so that initdb can assign system @@ -1348,15 +1348,15 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) InvokeObjectPostCreateHook(DatabaseRelationId, dboid, 0); /* - * If we're going to be reading data for the to-be-created database - * into shared_buffers, take a lock on it. Nobody should know that this + * If we're going to be reading data for the to-be-created database into + * shared_buffers, take a lock on it. Nobody should know that this * database exists yet, but it's good to maintain the invariant that a * lock an AccessExclusiveLock on the database is sufficient to drop all * of its buffers without worrying about more being read later. * - * Note that we need to do this before entering the PG_ENSURE_ERROR_CLEANUP - * block below, because createdb_failure_callback expects this lock to - * be held already. + * Note that we need to do this before entering the + * PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback + * expects this lock to be held already. */ if (dbstrategy == CREATEDB_WAL_LOG) LockSharedObject(DatabaseRelationId, dboid, 0, AccessShareLock); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index d2a24798220..c461061fe9e 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3833,7 +3833,7 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es) if (rte->tablefunc) if (rte->tablefunc->functype == TFT_XMLTABLE) objectname = "xmltable"; - else /* Must be TFT_JSON_TABLE */ + else /* Must be TFT_JSON_TABLE */ objectname = "json_table"; else objectname = NULL; diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 1013790dbb3..767d9b96190 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -758,10 +758,10 @@ execute_sql_string(const char *sql) CommandCounterIncrement(); stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, - sql, - NULL, - 0, - NULL); + sql, + NULL, + 0, + NULL); stmt_list = pg_plan_queries(stmt_list, sql, CURSOR_OPT_PARALLEL_OK, NULL); foreach(lc2, stmt_list) diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index 52534f18274..d1ee1064652 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -332,8 +332,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, /* * Inform cumulative stats system about our activity: basically, we * truncated the matview and inserted some new data. (The concurrent - * code path above doesn't need to worry about this because the inserts - * and deletes it issues get counted by lower-level code.) + * code path above doesn't need to worry about this because the + * inserts and deletes it issues get counted by lower-level code.) */ pgstat_count_truncate(matviewRel); if (!stmt->skipData) diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 6df0e6670fd..8e645741e4e 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -297,7 +297,7 @@ contain_invalid_rfcolumn_walker(Node *node, rf_context *context) */ bool pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors, - bool pubviaroot) + bool pubviaroot) { HeapTuple rftuple; Oid relid = RelationGetRelid(relation); @@ -373,7 +373,7 @@ pub_rf_contains_invalid_column(Oid pubid, Relation relation, List *ancestors, */ bool pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestors, - bool pubviaroot) + bool pubviaroot) { HeapTuple tuple; Oid relid = RelationGetRelid(relation); @@ -384,8 +384,8 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor /* * For a partition, if pubviaroot is true, find the topmost ancestor that - * is published via this publication as we need to use its column list - * for the changes. + * is published via this publication as we need to use its column list for + * the changes. * * Note that even though the column list used is for an ancestor, the * REPLICA IDENTITY used will be for the actual child table. @@ -399,19 +399,19 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor } tuple = SearchSysCache2(PUBLICATIONRELMAP, - ObjectIdGetDatum(publish_as_relid), - ObjectIdGetDatum(pubid)); + ObjectIdGetDatum(publish_as_relid), + ObjectIdGetDatum(pubid)); if (!HeapTupleIsValid(tuple)) return false; datum = SysCacheGetAttr(PUBLICATIONRELMAP, tuple, - Anum_pg_publication_rel_prattrs, - &isnull); + Anum_pg_publication_rel_prattrs, + &isnull); if (!isnull) { - int x; + int x; Bitmapset *idattrs; Bitmapset *columns = NULL; @@ -429,8 +429,9 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor /* * Attnums in the bitmap returned by RelationGetIndexAttrBitmap are * offset (to handle system columns the usual way), while column list - * does not use offset, so we can't do bms_is_subset(). Instead, we have - * to loop over the idattrs and check all of them are in the list. + * does not use offset, so we can't do bms_is_subset(). Instead, we + * have to loop over the idattrs and check all of them are in the + * list. */ x = -1; while ((x = bms_next_member(idattrs, x)) >= 0) @@ -440,14 +441,14 @@ pub_collist_contains_invalid_column(Oid pubid, Relation relation, List *ancestor /* * If pubviaroot is true, we are validating the column list of the * parent table, but the bitmap contains the replica identity - * information of the child table. The parent/child attnums may not - * match, so translate them to the parent - get the attname from - * the child, and look it up in the parent. + * information of the child table. The parent/child attnums may + * not match, so translate them to the parent - get the attname + * from the child, and look it up in the parent. */ if (pubviaroot) { /* attribute name in the child table */ - char *colname = get_attname(relid, attnum, false); + char *colname = get_attname(relid, attnum, false); /* * Determine the attnum for the attribute name in parent (we @@ -720,7 +721,7 @@ TransformPubWhereClauses(List *tables, const char *queryString, */ static void CheckPubRelationColumnList(List *tables, const char *queryString, - bool pubviaroot) + bool pubviaroot) { ListCell *lc; @@ -864,7 +865,7 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt) publish_via_partition_root); CheckPubRelationColumnList(rels, pstate->p_sourcetext, - publish_via_partition_root); + publish_via_partition_root); PublicationAddTables(puboid, rels, true, NULL); CloseTableList(rels); @@ -1198,8 +1199,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup, /* Transform the int2vector column list to a bitmap. */ columnListDatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple, - Anum_pg_publication_rel_prattrs, - &isnull); + Anum_pg_publication_rel_prattrs, + &isnull); if (!isnull) oldcolumns = pub_collist_to_bitmapset(NULL, columnListDatum, NULL); @@ -1210,15 +1211,15 @@ AlterPublicationTables(AlterPublicationStmt *stmt, HeapTuple tup, foreach(newlc, rels) { PublicationRelInfo *newpubrel; - Oid newrelid; - Bitmapset *newcolumns = NULL; + Oid newrelid; + Bitmapset *newcolumns = NULL; newpubrel = (PublicationRelInfo *) lfirst(newlc); newrelid = RelationGetRelid(newpubrel->relation); /* - * If the new publication has column list, transform it to - * a bitmap too. + * If the new publication has column list, transform it to a + * bitmap too. */ if (newpubrel->columns) { diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index 54a190722df..2e716743dd6 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -258,9 +258,9 @@ CreateStatistics(CreateStatsStmt *stmt) nattnums++; ReleaseSysCache(atttuple); } - else if (IsA(selem->expr, Var)) /* column reference in parens */ + else if (IsA(selem->expr, Var)) /* column reference in parens */ { - Var *var = (Var *) selem->expr; + Var *var = (Var *) selem->expr; TypeCacheEntry *type; /* Disallow use of system attributes in extended stats */ @@ -297,10 +297,11 @@ CreateStatistics(CreateStatsStmt *stmt) while ((k = bms_next_member(attnums, k)) >= 0) { AttrNumber attnum = k + FirstLowInvalidHeapAttributeNumber; + if (attnum <= 0) ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("statistics creation on system columns is not supported"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("statistics creation on system columns is not supported"))); } /* @@ -511,9 +512,9 @@ CreateStatistics(CreateStatsStmt *stmt) relation_close(statrel, RowExclusiveLock); /* - * We used to create the pg_statistic_ext_data tuple too, but it's not clear - * what value should the stxdinherit flag have (it depends on whether the rel - * is partitioned, contains data, etc.) + * We used to create the pg_statistic_ext_data tuple too, but it's not + * clear what value should the stxdinherit flag have (it depends on + * whether the rel is partitioned, contains data, etc.) */ InvokeObjectPostCreateHook(StatisticExtRelationId, statoid, 0); diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index b94236f74d3..690cdaa426e 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -1578,13 +1578,13 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel) PG_END_TRY(); /* - * Tell the cumulative stats system that the subscription is getting dropped. - * We can safely report dropping the subscription statistics here if the - * subscription is associated with a replication slot since we cannot run - * DROP SUBSCRIPTION inside a transaction block. Subscription statistics - * will be removed later by (auto)vacuum either if it's not associated - * with a replication slot or if the message for dropping the subscription - * gets lost. + * Tell the cumulative stats system that the subscription is getting + * dropped. We can safely report dropping the subscription statistics here + * if the subscription is associated with a replication slot since we + * cannot run DROP SUBSCRIPTION inside a transaction block. Subscription + * statistics will be removed later by (auto)vacuum either if it's not + * associated with a replication slot or if the message for dropping the + * subscription gets lost. */ if (slotname) pgstat_drop_subscription(subid); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 2cd8546d471..2de0ebacec3 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -495,8 +495,8 @@ static ObjectAddress addFkRecurseReferenced(List **wqueue, Constraint *fkconstra bool old_check_ok, Oid parentDelTrigger, Oid parentUpdTrigger); static void validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums, - int numfksetcols, const int16 *fksetcolsattnums, - List *fksetcols); + int numfksetcols, const int16 *fksetcolsattnums, + List *fksetcols); static void addFkRecurseReferencing(List **wqueue, Constraint *fkconstraint, Relation rel, Relation pkrel, Oid indexOid, Oid parentConstr, int numfks, int16 *pkattnum, int16 *fkattnum, @@ -5579,7 +5579,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode, foreach(lc, seqlist) { - Oid seq_relid = lfirst_oid(lc); + Oid seq_relid = lfirst_oid(lc); SequenceChangePersistence(seq_relid, tab->newrelpersistence); } @@ -9448,8 +9448,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums, { for (int i = 0; i < numfksetcols; i++) { - int16 setcol_attnum = fksetcolsattnums[i]; - bool seen = false; + int16 setcol_attnum = fksetcolsattnums[i]; + bool seen = false; for (int j = 0; j < numfks; j++) { @@ -9462,7 +9462,8 @@ validateFkOnDeleteSetColumns(int numfks, const int16 *fkattnums, if (!seen) { - char *col = strVal(list_nth(fksetcols, i)); + char *col = strVal(list_nth(fksetcols, i)); + ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), errmsg("column \"%s\" referenced in ON DELETE SET action must be part of foreign key", col))); @@ -15890,6 +15891,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple); InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0, InvalidOid, is_internal); + /* * Invalidate the relcache for the table, so that after we commit * all sessions will refresh the table's replica identity index @@ -17931,12 +17933,12 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd, /* * If the partition we just attached is partitioned itself, invalidate * relcache for all descendent partitions too to ensure that their - * rd_partcheck expression trees are rebuilt; partitions already locked - * at the beginning of this function. + * rd_partcheck expression trees are rebuilt; partitions already locked at + * the beginning of this function. */ if (attachrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - ListCell *l; + ListCell *l; foreach(l, attachrel_children) { @@ -18652,13 +18654,13 @@ DetachPartitionFinalize(Relation rel, Relation partRel, bool concurrent, /* * If the partition we just detached is partitioned itself, invalidate * relcache for all descendent partitions too to ensure that their - * rd_partcheck expression trees are rebuilt; must lock partitions - * before doing so, using the same lockmode as what partRel has been - * locked with by the caller. + * rd_partcheck expression trees are rebuilt; must lock partitions before + * doing so, using the same lockmode as what partRel has been locked with + * by the caller. */ if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - List *children; + List *children; children = find_all_inheritors(RelationGetRelid(partRel), AccessExclusiveLock, NULL); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 822d65287ef..690f05f6620 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -89,7 +89,7 @@ char *default_tablespace = NULL; char *temp_tablespaces = NULL; bool allow_in_place_tablespaces = false; -Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid; +Oid binary_upgrade_next_pg_tablespace_oid = InvalidOid; static void create_tablespace_directories(const char *location, const Oid tablespaceoid); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index c263f6c8b9f..984305ba31c 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -798,11 +798,11 @@ AlterRole(ParseState *pstate, AlterRoleStmt *stmt) */ if (drolemembers) { - List *rolemembers = (List *) drolemembers->arg; + List *rolemembers = (List *) drolemembers->arg; CommandCounterIncrement(); - if (stmt->action == +1) /* add members to role */ + if (stmt->action == +1) /* add members to role */ AddRoleMems(rolename, roleid, rolemembers, roleSpecsToIds(rolemembers), GetUserId(), false); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index e0fc7e8d794..8df25f59d87 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -1409,7 +1409,7 @@ vac_update_relstats(Relation relation, *frozenxid_updated = false; if (TransactionIdIsNormal(frozenxid) && oldfrozenxid != frozenxid) { - bool update = false; + bool update = false; if (TransactionIdPrecedes(oldfrozenxid, frozenxid)) update = true; @@ -1432,7 +1432,7 @@ vac_update_relstats(Relation relation, *minmulti_updated = false; if (MultiXactIdIsValid(minmulti) && oldminmulti != minmulti) { - bool update = false; + bool update = false; if (MultiXactIdPrecedes(oldminmulti, minmulti)) update = true; diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c index bbf3b69c57e..1753da6c830 100644 --- a/src/backend/commands/vacuumparallel.c +++ b/src/backend/commands/vacuumparallel.c @@ -112,7 +112,7 @@ typedef enum PVIndVacStatus PARALLEL_INDVAC_STATUS_NEED_BULKDELETE, PARALLEL_INDVAC_STATUS_NEED_CLEANUP, PARALLEL_INDVAC_STATUS_COMPLETED -} PVIndVacStatus; +} PVIndVacStatus; /* * Struct for index vacuum statistics of an index that is used for parallel vacuum. diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c index 38b94c02767..2831e7978b5 100644 --- a/src/backend/executor/execExpr.c +++ b/src/backend/executor/execExpr.c @@ -2504,7 +2504,7 @@ ExecInitExprRec(Expr *node, ExprState *state, if (ctor->type == JSCTOR_JSON_SCALAR) { bool is_jsonb = - ctor->returning->format->format_type == JS_FORMAT_JSONB; + ctor->returning->format->format_type == JS_FORMAT_JSONB; scratch.d.json_constructor.arg_type_cache = palloc(sizeof(*scratch.d.json_constructor.arg_type_cache) * nargs); @@ -2666,7 +2666,7 @@ ExecInitExprRec(Expr *node, ExprState *state, { cstate->coercion = *coercion; cstate->estate = *coercion ? - ExecInitExprWithCaseValue((Expr *)(*coercion)->expr, + ExecInitExprWithCaseValue((Expr *) (*coercion)->expr, state->parent, caseval, casenull) : NULL; } diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c index 3b1c045c52f..e024611aa54 100644 --- a/src/backend/executor/execExprInterp.c +++ b/src/backend/executor/execExprInterp.c @@ -3978,8 +3978,8 @@ ExecEvalJsonIsPredicate(ExprState *state, ExprEvalStep *op) } /* - * Do full parsing pass only for uniqueness check or for - * JSON text validation. + * Do full parsing pass only for uniqueness check or for JSON text + * validation. */ if (res && (pred->unique_keys || exprtype == TEXTOID)) res = json_validate(json, pred->unique_keys, false); @@ -4513,20 +4513,20 @@ ExecEvalJsonConstructor(ExprState *state, ExprEvalStep *op, if (ctor->type == JSCTOR_JSON_ARRAY) res = (is_jsonb ? jsonb_build_array_worker : - json_build_array_worker)(op->d.json_constructor.nargs, - op->d.json_constructor.arg_values, - op->d.json_constructor.arg_nulls, - op->d.json_constructor.arg_types, - op->d.json_constructor.constructor->absent_on_null); - else if (ctor->type == JSCTOR_JSON_OBJECT) - res = (is_jsonb ? - jsonb_build_object_worker : - json_build_object_worker)(op->d.json_constructor.nargs, + json_build_array_worker) (op->d.json_constructor.nargs, op->d.json_constructor.arg_values, op->d.json_constructor.arg_nulls, op->d.json_constructor.arg_types, - op->d.json_constructor.constructor->absent_on_null, - op->d.json_constructor.constructor->unique); + op->d.json_constructor.constructor->absent_on_null); + else if (ctor->type == JSCTOR_JSON_OBJECT) + res = (is_jsonb ? + jsonb_build_object_worker : + json_build_object_worker) (op->d.json_constructor.nargs, + op->d.json_constructor.arg_values, + op->d.json_constructor.arg_nulls, + op->d.json_constructor.arg_types, + op->d.json_constructor.constructor->absent_on_null, + op->d.json_constructor.constructor->unique); else if (ctor->type == JSCTOR_JSON_SCALAR) { if (op->d.json_constructor.arg_nulls[0]) @@ -4622,9 +4622,9 @@ static Datum ExecEvalJsonExprCoercion(ExprEvalStep *op, ExprContext *econtext, Datum res, bool *isNull, void *p, bool *error) { - ExprState *estate = p; + ExprState *estate = p; - if (estate) /* coerce using specified expression */ + if (estate) /* coerce using specified expression */ return ExecEvalExpr(estate, econtext, isNull); if (op->d.jsonexpr.jsexpr->op != JSON_EXISTS_OP) @@ -4696,7 +4696,7 @@ EvalJsonPathVar(void *cxt, char *varName, int varNameLen, if (!var->evaluated) { MemoryContext oldcxt = var->mcxt ? - MemoryContextSwitchTo(var->mcxt) : NULL; + MemoryContextSwitchTo(var->mcxt) : NULL; var->value = ExecEvalExpr(var->estate, var->econtext, &var->isnull); var->evaluated = true; @@ -4751,9 +4751,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item, case jbvString: coercion = &coercions->string; - res = PointerGetDatum( - cstring_to_text_with_len(item->val.string.val, - item->val.string.len)); + res = PointerGetDatum(cstring_to_text_with_len(item->val.string.val, + item->val.string.len)); break; case jbvNumeric: @@ -4809,8 +4808,8 @@ ExecPrepareJsonItemCoercion(JsonbValue *item, return res; } -typedef Datum (*JsonFunc)(ExprEvalStep *op, ExprContext *econtext, - Datum item, bool *resnull, void *p, bool *error); +typedef Datum (*JsonFunc) (ExprEvalStep *op, ExprContext *econtext, + Datum item, bool *resnull, void *p, bool *error); static Datum ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op, @@ -4826,8 +4825,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op, return func(op, econtext, res, resnull, p, error); /* - * We should catch exceptions of category ERRCODE_DATA_EXCEPTION - * and execute the corresponding ON ERROR behavior then. + * We should catch exceptions of category ERRCODE_DATA_EXCEPTION and + * execute the corresponding ON ERROR behavior then. */ oldcontext = CurrentMemoryContext; oldowner = CurrentResourceOwner; @@ -4864,7 +4863,8 @@ ExecEvalJsonExprSubtrans(JsonFunc func, ExprEvalStep *op, ecategory = ERRCODE_TO_CATEGORY(edata->sqlerrcode); - if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data errors */ + if (ecategory != ERRCODE_DATA_EXCEPTION && /* jsonpath and other data + * errors */ ecategory != ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION) /* domain errors */ ReThrowError(edata); @@ -4918,7 +4918,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext, if (error && *error) return (Datum) 0; - if (!jbv) /* NULL or empty */ + if (!jbv) /* NULL or empty */ break; Assert(!empty); @@ -4949,21 +4949,23 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext, *error = true; return (Datum) 0; } + /* * Coercion via I/O means here that the cast to the target * type simply does not exist. */ ereport(ERROR, - /* - * XXX Standard says about a separate error code - * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE - * but does not define its number. - */ + + /* + * XXX Standard says about a separate error code + * ERRCODE_SQL_JSON_ITEM_CANNOT_BE_CAST_TO_TARGET_TYPE but + * does not define its number. + */ (errcode(ERRCODE_SQL_JSON_SCALAR_REQUIRED), errmsg("SQL/JSON item cannot be cast to target type"))); } else if (!jcstate->estate) - return res; /* no coercion */ + return res; /* no coercion */ /* coerce using specific expression */ estate = jcstate->estate; @@ -5018,6 +5020,7 @@ ExecEvalJsonExpr(ExprEvalStep *op, ExprContext *econtext, } if (jexpr->on_empty->btype == JSON_BEHAVIOR_DEFAULT) + /* * Execute DEFAULT expression as a coercion expression, because * its result is already coerced to the target type. diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 90b2699a96b..5ef5c6930fd 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -575,6 +575,7 @@ ExecReScanIndexScan(IndexScanState *node) if (node->iss_ReorderQueue) { HeapTuple tuple; + while (!pairingheap_is_empty(node->iss_ReorderQueue)) { tuple = reorderqueue_pop(node); diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c index 23441e33cad..f7be4fc31f7 100644 --- a/src/backend/executor/nodeMemoize.c +++ b/src/backend/executor/nodeMemoize.c @@ -375,7 +375,7 @@ static void cache_purge_all(MemoizeState *mstate) { uint64 evictions = mstate->hashtable->members; - PlanState *pstate = (PlanState *) mstate; + PlanState *pstate = (PlanState *) mstate; /* * Likely the most efficient way to remove all items is to just reset the diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 982acfdad98..a49c3da5b6c 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -831,7 +831,7 @@ ExecInsert(ModifyTableContext *context, { TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc plan_tdesc = - CreateTupleDescCopy(planSlot->tts_tupleDescriptor); + CreateTupleDescCopy(planSlot->tts_tupleDescriptor); resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = MakeSingleTupleTableSlot(tdesc, slot->tts_ops); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 042a5f8b0a2..29bc26669b0 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -2267,10 +2267,10 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan) else { stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, - src, - plan->argtypes, - plan->nargs, - _SPI_current->queryEnv); + src, + plan->argtypes, + plan->nargs, + _SPI_current->queryEnv); } /* Finish filling in the CachedPlanSource */ @@ -2504,10 +2504,10 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options, else { stmt_list = pg_analyze_and_rewrite_fixedparams(parsetree, - src, - plan->argtypes, - plan->nargs, - _SPI_current->queryEnv); + src, + plan->argtypes, + plan->nargs, + _SPI_current->queryEnv); } /* Finish filling in the CachedPlanSource */ diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index fcd63218f28..6c72d43beb6 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -890,8 +890,8 @@ llvm_shutdown(int code, Datum arg) * has occurred in the middle of LLVM code. It is not safe to call back * into LLVM (which is why a FATAL error was thrown). * - * We do need to shutdown LLVM in other shutdown cases, otherwise - * e.g. profiling data won't be written out. + * We do need to shutdown LLVM in other shutdown cases, otherwise e.g. + * profiling data won't be written out. */ if (llvm_in_fatal_on_oom()) { diff --git a/src/backend/lib/dshash.c b/src/backend/lib/dshash.c index 1b94a76e43e..ec454b4d655 100644 --- a/src/backend/lib/dshash.c +++ b/src/backend/lib/dshash.c @@ -634,9 +634,9 @@ dshash_seq_next(dshash_seq_status *status) /* * Not yet holding any partition locks. Need to determine the size of the - * hash table, it could have been resized since we were looking - * last. Since we iterate in partition order, we can start by - * unconditionally lock partition 0. + * hash table, it could have been resized since we were looking last. + * Since we iterate in partition order, we can start by unconditionally + * lock partition 0. * * Once we hold the lock, no resizing can happen until the scan ends. So * we don't need to repeatedly call ensure_valid_bucket_pointers(). diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 03cdc72b406..75392a8bb7c 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -1967,8 +1967,8 @@ retry: * because no code should expect latches to survive across * CHECK_FOR_INTERRUPTS(). */ - ResetLatch(MyLatch); - goto retry; + ResetLatch(MyLatch); + goto retry; } } diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 836f427ea8f..205506305b0 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -2343,7 +2343,7 @@ _copyJsonReturning(const JsonReturning *from) static JsonValueExpr * _copyJsonValueExpr(const JsonValueExpr *from) { - JsonValueExpr *newnode = makeNode(JsonValueExpr); + JsonValueExpr *newnode = makeNode(JsonValueExpr); COPY_NODE_FIELD(raw_expr); COPY_NODE_FIELD(formatted_expr); @@ -2358,7 +2358,7 @@ _copyJsonValueExpr(const JsonValueExpr *from) static JsonParseExpr * _copyJsonParseExpr(const JsonParseExpr *from) { - JsonParseExpr *newnode = makeNode(JsonParseExpr); + JsonParseExpr *newnode = makeNode(JsonParseExpr); COPY_NODE_FIELD(expr); COPY_NODE_FIELD(output); @@ -2488,7 +2488,7 @@ _copyJsonObjectAgg(const JsonObjectAgg *from) static JsonOutput * _copyJsonOutput(const JsonOutput *from) { - JsonOutput *newnode = makeNode(JsonOutput); + JsonOutput *newnode = makeNode(JsonOutput); COPY_NODE_FIELD(typeName); COPY_NODE_FIELD(returning); @@ -2550,7 +2550,7 @@ _copyJsonArrayQueryConstructor(const JsonArrayQueryConstructor *from) static JsonExpr * _copyJsonExpr(const JsonExpr *from) { - JsonExpr *newnode = makeNode(JsonExpr); + JsonExpr *newnode = makeNode(JsonExpr); COPY_SCALAR_FIELD(op); COPY_NODE_FIELD(formatted_expr); @@ -2614,7 +2614,7 @@ _copyJsonItemCoercions(const JsonItemCoercions *from) static JsonFuncExpr * _copyJsonFuncExpr(const JsonFuncExpr *from) { - JsonFuncExpr *newnode = makeNode(JsonFuncExpr); + JsonFuncExpr *newnode = makeNode(JsonFuncExpr); COPY_SCALAR_FIELD(op); COPY_NODE_FIELD(common); @@ -2651,7 +2651,7 @@ _copyJsonIsPredicate(const JsonIsPredicate *from) static JsonBehavior * _copyJsonBehavior(const JsonBehavior *from) { - JsonBehavior *newnode = makeNode(JsonBehavior); + JsonBehavior *newnode = makeNode(JsonBehavior); COPY_SCALAR_FIELD(btype); COPY_NODE_FIELD(default_expr); @@ -2665,7 +2665,7 @@ _copyJsonBehavior(const JsonBehavior *from) static JsonCommon * _copyJsonCommon(const JsonCommon *from) { - JsonCommon *newnode = makeNode(JsonCommon); + JsonCommon *newnode = makeNode(JsonCommon); COPY_NODE_FIELD(expr); COPY_NODE_FIELD(pathspec); @@ -2682,7 +2682,7 @@ _copyJsonCommon(const JsonCommon *from) static JsonArgument * _copyJsonArgument(const JsonArgument *from) { - JsonArgument *newnode = makeNode(JsonArgument); + JsonArgument *newnode = makeNode(JsonArgument); COPY_NODE_FIELD(val); COPY_STRING_FIELD(name); @@ -2696,7 +2696,7 @@ _copyJsonArgument(const JsonArgument *from) static JsonTable * _copyJsonTable(const JsonTable *from) { - JsonTable *newnode = makeNode(JsonTable); + JsonTable *newnode = makeNode(JsonTable); COPY_NODE_FIELD(common); COPY_NODE_FIELD(columns); @@ -5480,7 +5480,7 @@ _copyExtensibleNode(const ExtensibleNode *from) static Integer * _copyInteger(const Integer *from) { - Integer *newnode = makeNode(Integer); + Integer *newnode = makeNode(Integer); COPY_SCALAR_FIELD(ival); @@ -5500,7 +5500,7 @@ _copyFloat(const Float *from) static Boolean * _copyBoolean(const Boolean *from) { - Boolean *newnode = makeNode(Boolean); + Boolean *newnode = makeNode(Boolean); COPY_SCALAR_FIELD(boolval); @@ -5520,7 +5520,7 @@ _copyString(const String *from) static BitString * _copyBitString(const BitString *from) { - BitString *newnode = makeNode(BitString); + BitString *newnode = makeNode(BitString); COPY_STRING_FIELD(bsval); diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index e013c1bbfed..9688b22a4b9 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -2802,8 +2802,7 @@ static bool _equalA_Const(const A_Const *a, const A_Const *b) { /* - * Hack for in-line val field. Also val is not valid is isnull is - * true. + * Hack for in-line val field. Also val is not valid is isnull is true. */ if (!a->isnull && !b->isnull && !equal(&a->val, &b->val)) diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 4ae5e5d4dd6..3b3ef3a4cdd 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -1003,7 +1003,7 @@ exprCollation(const Node *expr) break; case T_JsonExpr: { - JsonExpr *jexpr = (JsonExpr *) expr; + JsonExpr *jexpr = (JsonExpr *) expr; JsonCoercion *coercion = jexpr->result_coercion; if (!coercion) @@ -1239,7 +1239,8 @@ exprSetCollation(Node *expr, Oid collation) if (ctor->coercion) exprSetCollation((Node *) ctor->coercion, collation); else - Assert(!OidIsValid(collation)); /* result is always a json[b] type */ + Assert(!OidIsValid(collation)); /* result is always a + * json[b] type */ } break; case T_JsonIsPredicate: @@ -1247,7 +1248,7 @@ exprSetCollation(Node *expr, Oid collation) break; case T_JsonExpr: { - JsonExpr *jexpr = (JsonExpr *) expr; + JsonExpr *jexpr = (JsonExpr *) expr; JsonCoercion *coercion = jexpr->result_coercion; if (!coercion) @@ -2496,7 +2497,7 @@ expression_tree_walker(Node *node, return walker(((JsonIsPredicate *) node)->expr, context); case T_JsonExpr: { - JsonExpr *jexpr = (JsonExpr *) node; + JsonExpr *jexpr = (JsonExpr *) node; if (walker(jexpr->formatted_expr, context)) return true; @@ -3568,8 +3569,8 @@ expression_tree_mutator(Node *node, break; case T_JsonExpr: { - JsonExpr *jexpr = (JsonExpr *) node; - JsonExpr *newnode; + JsonExpr *jexpr = (JsonExpr *) node; + JsonExpr *newnode; FLATCOPY(newnode, jexpr, JsonExpr); MUTATE(newnode->path_spec, jexpr->path_spec, Node *); @@ -4545,7 +4546,7 @@ raw_expression_tree_walker(Node *node, break; case T_JsonTableColumn: { - JsonTableColumn *jtc = (JsonTableColumn *) node; + JsonTableColumn *jtc = (JsonTableColumn *) node; if (walker(jtc->typeName, context)) return true; diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index b1f2de8b28d..0271ea9d786 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -3613,8 +3613,8 @@ static void _outFloat(StringInfo str, const Float *node) { /* - * We assume the value is a valid numeric literal and so does not - * need quoting. + * We assume the value is a valid numeric literal and so does not need + * quoting. */ appendStringInfoString(str, node->fval); } @@ -3629,8 +3629,8 @@ static void _outString(StringInfo str, const String *node) { /* - * We use outToken to provide escaping of the string's content, - * but we don't want it to do anything with an empty string. + * We use outToken to provide escaping of the string's content, but we + * don't want it to do anything with an empty string. */ appendStringInfoChar(str, '"'); if (node->sval[0] != '\0') diff --git a/src/backend/nodes/value.c b/src/backend/nodes/value.c index 6fe55f5dd5c..5774a686706 100644 --- a/src/backend/nodes/value.c +++ b/src/backend/nodes/value.c @@ -22,7 +22,7 @@ Integer * makeInteger(int i) { - Integer *v = makeNode(Integer); + Integer *v = makeNode(Integer); v->ival = i; return v; @@ -48,7 +48,7 @@ makeFloat(char *numericStr) Boolean * makeBoolean(bool val) { - Boolean *v = makeNode(Boolean); + Boolean *v = makeNode(Boolean); v->boolval = val; return v; diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index d84f66a81b3..7ac116a791f 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -1777,17 +1777,18 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * When building a fractional path, determine a cheapest fractional - * path for each child relation too. Looking at startup and total - * costs is not enough, because the cheapest fractional path may be - * dominated by two separate paths (one for startup, one for total). + * When building a fractional path, determine a cheapest + * fractional path for each child relation too. Looking at startup + * and total costs is not enough, because the cheapest fractional + * path may be dominated by two separate paths (one for startup, + * one for total). * * When needed (building fractional path), determine the cheapest * fractional path too. */ if (root->tuple_fraction > 0) { - double path_fraction = (1.0 / root->tuple_fraction); + double path_fraction = (1.0 / root->tuple_fraction); cheapest_fractional = get_cheapest_fractional_path_for_pathkeys(childrel->pathlist, @@ -1796,8 +1797,8 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, path_fraction); /* - * If we found no path with matching pathkeys, use the cheapest - * total path instead. + * If we found no path with matching pathkeys, use the + * cheapest total path instead. * * XXX We might consider partially sorted paths too (with an * incremental sort on top). But we'd have to build all the diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 6673d271c26..ed98ba7dbd2 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1794,7 +1794,7 @@ is_fake_var(Expr *expr) static double get_width_cost_multiplier(PlannerInfo *root, Expr *expr) { - double width = -1.0; /* fake value */ + double width = -1.0; /* fake value */ if (IsA(expr, RelabelType)) expr = (Expr *) ((RelabelType *) expr)->arg; @@ -1802,17 +1802,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) /* Try to find actual stat in corresponding relation */ if (IsA(expr, Var)) { - Var *var = (Var *) expr; + Var *var = (Var *) expr; if (var->varno > 0 && var->varno < root->simple_rel_array_size) { - RelOptInfo *rel = root->simple_rel_array[var->varno]; + RelOptInfo *rel = root->simple_rel_array[var->varno]; if (rel != NULL && var->varattno >= rel->min_attr && var->varattno <= rel->max_attr) { - int ndx = var->varattno - rel->min_attr; + int ndx = var->varattno - rel->min_attr; if (rel->attr_widths[ndx] > 0) width = rel->attr_widths[ndx]; @@ -1823,7 +1823,7 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) /* Didn't find any actual stats, try using type width instead. */ if (width < 0.0) { - Node *node = (Node*) expr; + Node *node = (Node *) expr; width = get_typavgwidth(exprType(node), exprTypmod(node)); } @@ -1832,17 +1832,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) * Values are passed as Datum type, so comparisons can't be cheaper than * comparing a Datum value. * - * FIXME I find this reasoning questionable. We may pass int2, and comparing - * it is probably a bit cheaper than comparing a bigint. + * FIXME I find this reasoning questionable. We may pass int2, and + * comparing it is probably a bit cheaper than comparing a bigint. */ if (width <= sizeof(Datum)) return 1.0; /* * We consider the cost of a comparison not to be directly proportional to - * width of the argument, because widths of the arguments could be slightly - * different (we only know the average width for the whole column). So we - * use log16(width) as an estimate. + * width of the argument, because widths of the arguments could be + * slightly different (we only know the average width for the whole + * column). So we use log16(width) as an estimate. */ return 1.0 + 0.125 * LOG2(width / sizeof(Datum)); } @@ -1902,23 +1902,23 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, bool heapSort) { Cost per_tuple_cost = 0.0; - ListCell *lc; - List *pathkeyExprs = NIL; + ListCell *lc; + List *pathkeyExprs = NIL; double tuplesPerPrevGroup = tuples; double totalFuncCost = 1.0; bool has_fake_var = false; int i = 0; Oid prev_datatype = InvalidOid; - List *cache_varinfos = NIL; + List *cache_varinfos = NIL; /* fallback if pathkeys is unknown */ if (list_length(pathkeys) == 0) { /* - * If we'll use a bounded heap-sort keeping just K tuples in memory, for - * a total number of tuple comparisons of N log2 K; but the constant - * factor is a bit higher than for quicksort. Tweak it so that the cost - * curve is continuous at the crossover point. + * If we'll use a bounded heap-sort keeping just K tuples in memory, + * for a total number of tuple comparisons of N log2 K; but the + * constant factor is a bit higher than for quicksort. Tweak it so + * that the cost curve is continuous at the crossover point. */ output_tuples = (heapSort) ? 2.0 * output_tuples : tuples; per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples); @@ -1930,17 +1930,17 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, } /* - * Computing total cost of sorting takes into account: - * - per column comparison function cost - * - we try to compute needed number of comparison per column + * Computing total cost of sorting takes into account the per-column + * comparison function cost. We try to compute the needed number of + * comparisons per column. */ foreach(lc, pathkeys) { - PathKey *pathkey = (PathKey*) lfirst(lc); - EquivalenceMember *em; - double nGroups, - correctedNGroups; - Cost funcCost = 1.0; + PathKey *pathkey = (PathKey *) lfirst(lc); + EquivalenceMember *em; + double nGroups, + correctedNGroups; + Cost funcCost = 1.0; /* * We believe that equivalence members aren't very different, so, to @@ -1985,10 +1985,10 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, pathkeyExprs = lappend(pathkeyExprs, em->em_expr); /* - * We need to calculate the number of comparisons for this column, which - * requires knowing the group size. So we estimate the number of groups - * by calling estimate_num_groups_incremental(), which estimates the - * group size for "new" pathkeys. + * We need to calculate the number of comparisons for this column, + * which requires knowing the group size. So we estimate the number of + * groups by calling estimate_num_groups_incremental(), which + * estimates the group size for "new" pathkeys. * * Note: estimate_num_groups_incremental does not handle fake Vars, so * use a default estimate otherwise. @@ -1999,26 +1999,30 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, &cache_varinfos, list_length(pathkeyExprs) - 1); else if (tuples > 4.0) + /* * Use geometric mean as estimation if there are no stats. * - * We don't use DEFAULT_NUM_DISTINCT here, because that’s used for - * a single column, but here we’re dealing with multiple columns. + * We don't use DEFAULT_NUM_DISTINCT here, because that's used for + * a single column, but here we're dealing with multiple columns. */ nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys)); else nGroups = tuples; /* - * Presorted keys are not considered in the cost above, but we still do - * have to compare them in the qsort comparator. So make sure to factor - * in the cost in that case. + * Presorted keys are not considered in the cost above, but we still + * do have to compare them in the qsort comparator. So make sure to + * factor in the cost in that case. */ if (i >= nPresortedKeys) { if (heapSort) { - /* have to keep at least one group, and a multiple of group size */ + /* + * have to keep at least one group, and a multiple of group + * size + */ correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup); } else @@ -2033,19 +2037,20 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, i++; /* - * Uniform distributions with all groups being of the same size are the - * best case, with nice smooth behavior. Real-world distributions tend - * not to be uniform, though, and we don’t have any reliable easy-to-use - * information. As a basic defense against skewed distributions, we use - * a 1.5 factor to make the expected group a bit larger, but we need to - * be careful not to make the group larger than in the preceding step. + * Uniform distributions with all groups being of the same size are + * the best case, with nice smooth behavior. Real-world distributions + * tend not to be uniform, though, and we don't have any reliable + * easy-to-use information. As a basic defense against skewed + * distributions, we use a 1.5 factor to make the expected group a bit + * larger, but we need to be careful not to make the group larger than + * in the preceding step. */ tuplesPerPrevGroup = Min(tuplesPerPrevGroup, ceil(1.5 * tuplesPerPrevGroup / nGroups)); /* - * Once we get single-row group, it means tuples in the group are unique - * and we can skip all remaining columns. + * Once we get single-row group, it means tuples in the group are + * unique and we can skip all remaining columns. */ if (tuplesPerPrevGroup <= 1.0) break; @@ -2057,15 +2062,15 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, per_tuple_cost *= cpu_operator_cost; /* - * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E. - * Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation - * formula has additional term proportional to number of tuples (See Chapter - * 8.2 and Theorem 4.1). That affects cases with a low number of tuples, - * approximately less than 1e4. We could implement it as an additional - * multiplier under the logarithm, but we use a bit more complex formula - * which takes into account the number of unique tuples and it’s not clear - * how to combine the multiplier with the number of groups. Estimate it as - * 10 in cpu_operator_cost unit. + * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles + * E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort + * estimation formula has additional term proportional to number of tuples + * (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number + * of tuples, approximately less than 1e4. We could implement it as an + * additional multiplier under the logarithm, but we use a bit more + * complex formula which takes into account the number of unique tuples + * and it's not clear how to combine the multiplier with the number of + * groups. Estimate it as 10 cpu_operator_cost units. */ per_tuple_cost += 10 * cpu_operator_cost; @@ -2082,7 +2087,7 @@ cost_sort_estimate(PlannerInfo *root, List *pathkeys, int nPresortedKeys, double tuples) { return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys, - 0, tuples, tuples, false); + 0, tuples, tuples, false); } /* diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 34c5ab1cb60..60c0e3f1089 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -685,9 +685,9 @@ get_eclass_for_sort_expr(PlannerInfo *root, /* * Match! * - * Copy the sortref if it wasn't set yet. That may happen if the - * ec was constructed from WHERE clause, i.e. it doesn't have a - * target reference at all. + * Copy the sortref if it wasn't set yet. That may happen if + * the ec was constructed from WHERE clause, i.e. it doesn't + * have a target reference at all. */ if (cur_ec->ec_sortref == 0 && sortref > 0) cur_ec->ec_sortref = sortref; diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 9a8c5165b04..55206ec54d2 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -1258,7 +1258,7 @@ sort_inner_and_outer(PlannerInfo *root, foreach(l, all_pathkeys) { - PathKey *front_pathkey = (PathKey *) lfirst(l); + PathKey *front_pathkey = (PathKey *) lfirst(l); List *cur_mergeclauses; List *outerkeys; List *innerkeys; diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 91556910aec..9775c4a7225 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -32,7 +32,7 @@ #include "utils/selfuncs.h" /* Consider reordering of GROUP BY keys? */ -bool enable_group_by_reordering = true; +bool enable_group_by_reordering = true; static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys); static bool matches_boolean_partition_clause(RestrictInfo *rinfo, @@ -352,7 +352,7 @@ int group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, List **group_clauses) { - List *new_group_pathkeys= NIL, + List *new_group_pathkeys = NIL, *new_group_clauses = NIL; ListCell *lc; int n; @@ -365,16 +365,16 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, * there's a matching GROUP BY key. If we find one, we append it to the * list, and do the same for the clauses. * - * Once we find the first pathkey without a matching GROUP BY key, the rest - * of the pathkeys are useless and can't be used to evaluate the grouping, - * so we abort the loop and ignore the remaining pathkeys. + * Once we find the first pathkey without a matching GROUP BY key, the + * rest of the pathkeys are useless and can't be used to evaluate the + * grouping, so we abort the loop and ignore the remaining pathkeys. * * XXX Pathkeys are built in a way to allow simply comparing pointers. */ foreach(lc, pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(lc); - SortGroupClause *sgc; + PathKey *pathkey = (PathKey *) lfirst(lc); + SortGroupClause *sgc; /* abort on first mismatch */ if (!list_member_ptr(*group_pathkeys, pathkey)) @@ -403,13 +403,14 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, /* * Used to generate all permutations of a pathkey list. */ -typedef struct PathkeyMutatorState { +typedef struct PathkeyMutatorState +{ List *elemsList; ListCell **elemCells; void **elems; int *positions; - int mutatorNColumns; - int count; + int mutatorNColumns; + int count; } PathkeyMutatorState; @@ -428,9 +429,9 @@ typedef struct PathkeyMutatorState { static void PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) { - int i; + int i; int n = end - start; - ListCell *lc; + ListCell *lc; memset(state, 0, sizeof(*state)); @@ -438,8 +439,8 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) state->elemsList = list_copy(elems); - state->elems = palloc(sizeof(void*) * n); - state->elemCells = palloc(sizeof(ListCell*) * n); + state->elems = palloc(sizeof(void *) * n); + state->elemCells = palloc(sizeof(ListCell *) * n); state->positions = palloc(sizeof(int) * n); i = 0; @@ -459,10 +460,10 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) static void PathkeyMutatorSwap(int *a, int i, int j) { - int s = a[i]; + int s = a[i]; - a[i] = a[j]; - a[j] = s; + a[i] = a[j]; + a[j] = s; } /* @@ -471,7 +472,10 @@ PathkeyMutatorSwap(int *a, int i, int j) static bool PathkeyMutatorNextSet(int *a, int n) { - int j, k, l, r; + int j, + k, + l, + r; j = n - 2; @@ -507,7 +511,7 @@ PathkeyMutatorNextSet(int *a, int n) static List * PathkeyMutatorNext(PathkeyMutatorState *state) { - int i; + int i; state->count++; @@ -528,9 +532,9 @@ PathkeyMutatorNext(PathkeyMutatorState *state) } /* update the list cells to point to the right elements */ - for(i = 0; i < state->mutatorNColumns; i++) + for (i = 0; i < state->mutatorNColumns; i++) lfirst(state->elemCells[i]) = - (void *) state->elems[ state->positions[i] - 1 ]; + (void *) state->elems[state->positions[i] - 1]; return state->elemsList; } @@ -541,7 +545,7 @@ PathkeyMutatorNext(PathkeyMutatorState *state) typedef struct PathkeySortCost { Cost cost; - PathKey *pathkey; + PathKey *pathkey; } PathkeySortCost; static int @@ -581,41 +585,42 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, List **group_pathkeys, List **group_clauses, int n_preordered) { - List *new_group_pathkeys = NIL, - *new_group_clauses = NIL, - *var_group_pathkeys; + List *new_group_pathkeys = NIL, + *new_group_clauses = NIL, + *var_group_pathkeys; - ListCell *cell; - PathkeyMutatorState mstate; - double cheapest_sort_cost = -1.0; + ListCell *cell; + PathkeyMutatorState mstate; + double cheapest_sort_cost = -1.0; - int nFreeKeys; - int nToPermute; + int nFreeKeys; + int nToPermute; /* If there are less than 2 unsorted pathkeys, we're done. */ if (list_length(*group_pathkeys) - n_preordered < 2) return false; /* - * We could exhaustively cost all possible orderings of the pathkeys, but for - * a large number of pathkeys it might be prohibitively expensive. So we try - * to apply simple cheap heuristics first - we sort the pathkeys by sort cost - * (as if the pathkey was sorted independently) and then check only the four - * cheapest pathkeys. The remaining pathkeys are kept ordered by cost. + * We could exhaustively cost all possible orderings of the pathkeys, but + * for a large number of pathkeys it might be prohibitively expensive. So + * we try to apply simple cheap heuristics first - we sort the pathkeys by + * sort cost (as if the pathkey was sorted independently) and then check + * only the four cheapest pathkeys. The remaining pathkeys are kept + * ordered by cost. * * XXX This is a very simple heuristics, but likely to work fine for most - * cases (because the number of GROUP BY clauses tends to be lower than 4). - * But it ignores how the number of distinct values in each pathkey affects - * the following steps. It might be better to use "more expensive" pathkey - * first if it has many distinct values, because it then limits the number - * of comparisons for the remaining pathkeys. But evaluating that is likely - * quite the expensive. + * cases (because the number of GROUP BY clauses tends to be lower than + * 4). But it ignores how the number of distinct values in each pathkey + * affects the following steps. It might be better to use "more expensive" + * pathkey first if it has many distinct values, because it then limits + * the number of comparisons for the remaining pathkeys. But evaluating + * that is likely quite the expensive. */ nFreeKeys = list_length(*group_pathkeys) - n_preordered; nToPermute = 4; if (nFreeKeys > nToPermute) { - int i; + int i; PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys); /* skip the pre-ordered pathkeys */ @@ -624,7 +629,7 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, /* estimate cost for sorting individual pathkeys */ for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell))) { - List *to_cost = list_make1(lfirst(cell)); + List *to_cost = list_make1(lfirst(cell)); Assert(i < nFreeKeys); @@ -658,28 +663,29 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys)); /* - * Generate pathkey lists with permutations of the first nToPermute pathkeys. + * Generate pathkey lists with permutations of the first nToPermute + * pathkeys. * * XXX We simply calculate sort cost for each individual pathkey list, but - * there's room for two dynamic programming optimizations here. Firstly, we - * may pass the current "best" cost to cost_sort_estimate so that it can - * "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass - * the return information about the position when it exceeded the cost, and - * we could skip all permutations with the same prefix. + * there's room for two dynamic programming optimizations here. Firstly, + * we may pass the current "best" cost to cost_sort_estimate so that it + * can "abort" if the estimated pathkeys list exceeds it. Secondly, it + * could pass the return information about the position when it exceeded + * the cost, and we could skip all permutations with the same prefix. * * Imagine we've already found ordering with cost C1, and we're evaluating * another ordering - cost_sort_estimate() calculates cost by adding the * pathkeys one by one (more or less), and the cost only grows. If at any - * point it exceeds C1, it can't possibly be "better" so we can discard it. - * But we also know that we can discard all ordering with the same prefix, - * because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the - * same thing will happen for any ordering with this prefix. + * point it exceeds C1, it can't possibly be "better" so we can discard + * it. But we also know that we can discard all ordering with the same + * prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) + * then the same thing will happen for any ordering with this prefix. */ PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute); - while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL) + while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL) { - Cost cost; + Cost cost; cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows); @@ -694,11 +700,11 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, /* Reorder the group clauses according to the reordered pathkeys. */ foreach(cell, new_group_pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(cell); + PathKey *pathkey = (PathKey *) lfirst(cell); new_group_clauses = lappend(new_group_clauses, - get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref, - *group_clauses)); + get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref, + *group_clauses)); } /* Just append the rest GROUP BY clauses */ @@ -745,8 +751,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, PathKeyInfo *info; int n_preordered = 0; - List *pathkeys = group_pathkeys; - List *clauses = group_clauses; + List *pathkeys = group_pathkeys; + List *clauses = group_clauses; /* always return at least the original pathkeys/clauses */ info = makeNode(PathKeyInfo); @@ -756,9 +762,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, infos = lappend(infos, info); /* - * Should we try generating alternative orderings of the group keys? If not, - * we produce only the order specified in the query, i.e. the optimization - * is effectively disabled. + * Should we try generating alternative orderings of the group keys? If + * not, we produce only the order specified in the query, i.e. the + * optimization is effectively disabled. */ if (!enable_group_by_reordering) return infos; @@ -782,8 +788,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, } /* - * If the path is sorted in some way, try reordering the group keys to match - * as much of the ordering as possible - we get this sort for free (mostly). + * If the path is sorted in some way, try reordering the group keys to + * match as much of the ordering as possible - we get this sort for free + * (mostly). * * We must not do this when there are no grouping sets, because those use * more complex logic to decide the ordering. @@ -2400,8 +2407,8 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys) static int pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys) { - ListCell *key; - int n = 0; + ListCell *key; + int n = 0; /* no special ordering requested for grouping */ if (root->group_pathkeys == NIL) @@ -2414,7 +2421,7 @@ pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys) /* walk the pathkeys and search for matching group key */ foreach(key, pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(key); + PathKey *pathkey = (PathKey *) lfirst(key); /* no matching group key, we're done */ if (!list_member_ptr(root->group_pathkeys, pathkey)) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index db11936efef..f4cc56039c2 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -1162,8 +1162,8 @@ mark_async_capable_plan(Plan *plan, Path *path) case T_ProjectionPath: /* - * If the generated plan node includes a Result node for - * the projection, we can't execute it asynchronously. + * If the generated plan node includes a Result node for the + * projection, we can't execute it asynchronously. */ if (IsA(plan, Result)) return false; diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 9a4accb4d9d..a0f2390334e 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -6250,7 +6250,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, Assert(list_length(pathkey_orderings) > 0); /* process all potentially interesting grouping reorderings */ - foreach (lc2, pathkey_orderings) + foreach(lc2, pathkey_orderings) { bool is_sorted; int presorted_keys = 0; @@ -6283,8 +6283,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (parse->hasAggs) { /* - * We have aggregation, possibly with plain GROUP BY. Make - * an AggPath. + * We have aggregation, possibly with plain GROUP BY. + * Make an AggPath. */ add_path(grouped_rel, (Path *) create_agg_path(root, @@ -6301,8 +6301,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (group_clauses) { /* - * We have GROUP BY without aggregation or grouping sets. - * Make a GroupPath. + * We have GROUP BY without aggregation or grouping + * sets. Make a GroupPath. */ add_path(grouped_rel, (Path *) create_group_path(root, @@ -6321,8 +6321,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, /* * Now we may consider incremental sort on this path, but only - * when the path is not already sorted and when incremental sort - * is enabled. + * when the path is not already sorted and when incremental + * sort is enabled. */ if (is_sorted || !enable_incremental_sort) continue; @@ -6335,8 +6335,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, continue; /* - * We should have already excluded pathkeys of length 1 because - * then presorted_keys > 0 would imply is_sorted was true. + * We should have already excluded pathkeys of length 1 + * because then presorted_keys > 0 would imply is_sorted was + * true. */ Assert(list_length(root->group_pathkeys) != 1); @@ -6357,8 +6358,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (parse->hasAggs) { /* - * We have aggregation, possibly with plain GROUP BY. Make an - * AggPath. + * We have aggregation, possibly with plain GROUP BY. Make + * an AggPath. */ add_path(grouped_rel, (Path *) create_agg_path(root, @@ -6375,8 +6376,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, else if (parse->groupClause) { /* - * We have GROUP BY without aggregation or grouping sets. Make - * a GroupPath. + * We have GROUP BY without aggregation or grouping sets. + * Make a GroupPath. */ add_path(grouped_rel, (Path *) create_group_path(root, @@ -6421,7 +6422,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, Assert(list_length(pathkey_orderings) > 0); /* process all potentially interesting grouping reorderings */ - foreach (lc2, pathkey_orderings) + foreach(lc2, pathkey_orderings) { bool is_sorted; int presorted_keys = 0; @@ -6435,8 +6436,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, &presorted_keys); /* - * Insert a Sort node, if required. But there's no point in - * sorting anything but the cheapest path. + * Insert a Sort node, if required. But there's no point + * in sorting anything but the cheapest path. */ if (!is_sorted) { @@ -6471,24 +6472,30 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel, dNumGroups)); /* - * Now we may consider incremental sort on this path, but only - * when the path is not already sorted and when incremental - * sort is enabled. + * Now we may consider incremental sort on this path, but + * only when the path is not already sorted and when + * incremental sort is enabled. */ if (is_sorted || !enable_incremental_sort) continue; - /* Restore the input path (we might have added Sort on top). */ + /* + * Restore the input path (we might have added Sort on + * top). + */ path = path_original; - /* no shared prefix, not point in building incremental sort */ + /* + * no shared prefix, not point in building incremental + * sort + */ if (presorted_keys == 0) continue; /* * We should have already excluded pathkeys of length 1 - * because then presorted_keys > 0 would imply is_sorted was - * true. + * because then presorted_keys > 0 would imply is_sorted + * was true. */ Assert(list_length(root->group_pathkeys) != 1); @@ -6741,7 +6748,7 @@ create_partial_grouping_paths(PlannerInfo *root, Assert(list_length(pathkey_orderings) > 0); /* process all potentially interesting grouping reorderings */ - foreach (lc2, pathkey_orderings) + foreach(lc2, pathkey_orderings) { bool is_sorted; int presorted_keys = 0; @@ -6874,7 +6881,7 @@ create_partial_grouping_paths(PlannerInfo *root, Assert(list_length(pathkey_orderings) > 0); /* process all potentially interesting grouping reorderings */ - foreach (lc2, pathkey_orderings) + foreach(lc2, pathkey_orderings) { bool is_sorted; int presorted_keys = 0; @@ -6924,8 +6931,8 @@ create_partial_grouping_paths(PlannerInfo *root, /* * Now we may consider incremental sort on this path, but only - * when the path is not already sorted and when incremental sort - * is enabled. + * when the path is not already sorted and when incremental + * sort is enabled. */ if (is_sorted || !enable_incremental_sort) continue; @@ -6938,8 +6945,9 @@ create_partial_grouping_paths(PlannerInfo *root, continue; /* - * We should have already excluded pathkeys of length 1 because - * then presorted_keys > 0 would imply is_sorted was true. + * We should have already excluded pathkeys of length 1 + * because then presorted_keys > 0 would imply is_sorted was + * true. */ Assert(list_length(root->group_pathkeys) != 1); diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index e381ae512a2..533df86ff77 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -391,7 +391,7 @@ contain_mutable_functions_walker(Node *node, void *context) const JsonConstructorExpr *ctor = (JsonConstructorExpr *) node; ListCell *lc; bool is_jsonb = - ctor->returning->format->format_type == JS_FORMAT_JSONB; + ctor->returning->format->format_type == JS_FORMAT_JSONB; /* Check argument_type => json[b] conversions */ foreach(lc, ctor->args) @@ -899,7 +899,7 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context) /* JsonExpr is parallel-unsafe if subtransactions can be used. */ else if (IsA(node, JsonExpr)) { - JsonExpr *jsexpr = (JsonExpr *) node; + JsonExpr *jsexpr = (JsonExpr *) node; if (ExecEvalJsonNeedsSubTransaction(jsexpr, NULL)) { @@ -3581,7 +3581,7 @@ eval_const_expressions_mutator(Node *node, context->case_val = raw; formatted = eval_const_expressions_mutator((Node *) jve->formatted_expr, - context); + context); context->case_val = save_case_val; @@ -5315,7 +5315,7 @@ pull_paramids_walker(Node *node, Bitmapset **context) return false; if (IsA(node, Param)) { - Param *param = (Param *)node; + Param *param = (Param *) node; *context = bms_add_member(*context, param->paramid); return false; diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index df97b799174..5012bfe1425 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -968,102 +968,102 @@ estimate_rel_size(Relation rel, int32 *attr_widths, if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind)) { - table_relation_estimate_size(rel, attr_widths, pages, tuples, - allvisfrac); + table_relation_estimate_size(rel, attr_widths, pages, tuples, + allvisfrac); } else if (rel->rd_rel->relkind == RELKIND_INDEX) { - /* - * XXX: It'd probably be good to move this into a callback, - * individual index types e.g. know if they have a metapage. - */ + /* + * XXX: It'd probably be good to move this into a callback, individual + * index types e.g. know if they have a metapage. + */ - /* it has storage, ok to call the smgr */ - curpages = RelationGetNumberOfBlocks(rel); + /* it has storage, ok to call the smgr */ + curpages = RelationGetNumberOfBlocks(rel); - /* report estimated # pages */ - *pages = curpages; - /* quick exit if rel is clearly empty */ - if (curpages == 0) - { - *tuples = 0; - *allvisfrac = 0; - return; - } + /* report estimated # pages */ + *pages = curpages; + /* quick exit if rel is clearly empty */ + if (curpages == 0) + { + *tuples = 0; + *allvisfrac = 0; + return; + } - /* coerce values in pg_class to more desirable types */ - relpages = (BlockNumber) rel->rd_rel->relpages; - reltuples = (double) rel->rd_rel->reltuples; - relallvisible = (BlockNumber) rel->rd_rel->relallvisible; + /* coerce values in pg_class to more desirable types */ + relpages = (BlockNumber) rel->rd_rel->relpages; + reltuples = (double) rel->rd_rel->reltuples; + relallvisible = (BlockNumber) rel->rd_rel->relallvisible; + /* + * Discount the metapage while estimating the number of tuples. This + * is a kluge because it assumes more than it ought to about index + * structure. Currently it's OK for btree, hash, and GIN indexes but + * suspect for GiST indexes. + */ + if (relpages > 0) + { + curpages--; + relpages--; + } + + /* estimate number of tuples from previous tuple density */ + if (reltuples >= 0 && relpages > 0) + density = reltuples / (double) relpages; + else + { /* - * Discount the metapage while estimating the number of tuples. - * This is a kluge because it assumes more than it ought to about - * index structure. Currently it's OK for btree, hash, and GIN - * indexes but suspect for GiST indexes. + * If we have no data because the relation was never vacuumed, + * estimate tuple width from attribute datatypes. We assume here + * that the pages are completely full, which is OK for tables + * (since they've presumably not been VACUUMed yet) but is + * probably an overestimate for indexes. Fortunately + * get_relation_info() can clamp the overestimate to the parent + * table's size. + * + * Note: this code intentionally disregards alignment + * considerations, because (a) that would be gilding the lily + * considering how crude the estimate is, and (b) it creates + * platform dependencies in the default plans which are kind of a + * headache for regression testing. + * + * XXX: Should this logic be more index specific? */ - if (relpages > 0) - { - curpages--; - relpages--; - } - - /* estimate number of tuples from previous tuple density */ - if (reltuples >= 0 && relpages > 0) - density = reltuples / (double) relpages; - else - { - /* - * If we have no data because the relation was never vacuumed, - * estimate tuple width from attribute datatypes. We assume - * here that the pages are completely full, which is OK for - * tables (since they've presumably not been VACUUMed yet) but - * is probably an overestimate for indexes. Fortunately - * get_relation_info() can clamp the overestimate to the - * parent table's size. - * - * Note: this code intentionally disregards alignment - * considerations, because (a) that would be gilding the lily - * considering how crude the estimate is, and (b) it creates - * platform dependencies in the default plans which are kind - * of a headache for regression testing. - * - * XXX: Should this logic be more index specific? - */ - int32 tuple_width; + int32 tuple_width; - tuple_width = get_rel_data_width(rel, attr_widths); - tuple_width += MAXALIGN(SizeofHeapTupleHeader); - tuple_width += sizeof(ItemIdData); - /* note: integer division is intentional here */ - density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width; - } - *tuples = rint(density * (double) curpages); + tuple_width = get_rel_data_width(rel, attr_widths); + tuple_width += MAXALIGN(SizeofHeapTupleHeader); + tuple_width += sizeof(ItemIdData); + /* note: integer division is intentional here */ + density = (BLCKSZ - SizeOfPageHeaderData) / tuple_width; + } + *tuples = rint(density * (double) curpages); - /* - * We use relallvisible as-is, rather than scaling it up like we - * do for the pages and tuples counts, on the theory that any - * pages added since the last VACUUM are most likely not marked - * all-visible. But costsize.c wants it converted to a fraction. - */ - if (relallvisible == 0 || curpages <= 0) - *allvisfrac = 0; - else if ((double) relallvisible >= curpages) - *allvisfrac = 1; - else - *allvisfrac = (double) relallvisible / curpages; + /* + * We use relallvisible as-is, rather than scaling it up like we do + * for the pages and tuples counts, on the theory that any pages added + * since the last VACUUM are most likely not marked all-visible. But + * costsize.c wants it converted to a fraction. + */ + if (relallvisible == 0 || curpages <= 0) + *allvisfrac = 0; + else if ((double) relallvisible >= curpages) + *allvisfrac = 1; + else + *allvisfrac = (double) relallvisible / curpages; } else { - /* - * Just use whatever's in pg_class. This covers foreign tables, - * sequences, and also relkinds without storage (shouldn't get - * here?); see initializations in AddNewRelationTuple(). Note - * that FDW must cope if reltuples is -1! - */ - *pages = rel->rd_rel->relpages; - *tuples = rel->rd_rel->reltuples; - *allvisfrac = 0; + /* + * Just use whatever's in pg_class. This covers foreign tables, + * sequences, and also relkinds without storage (shouldn't get here?); + * see initializations in AddNewRelationTuple(). Note that FDW must + * cope if reltuples is -1! + */ + *pages = rel->rd_rel->relpages; + *tuples = rel->rd_rel->reltuples; + *allvisfrac = 0; } } diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 6b54e8e46df..1bcb875507d 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -104,8 +104,8 @@ static bool test_raw_expression_coverage(Node *node, void *context); */ Query * parse_analyze_fixedparams(RawStmt *parseTree, const char *sourceText, - const Oid *paramTypes, int numParams, - QueryEnvironment *queryEnv) + const Oid *paramTypes, int numParams, + QueryEnvironment *queryEnv) { ParseState *pstate = make_parsestate(NULL); Query *query; @@ -2076,8 +2076,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, ListCell *ltl; ListCell *rtl; const char *context; - bool recursive = (pstate->p_parent_cte && - pstate->p_parent_cte->cterecursive); + bool recursive = (pstate->p_parent_cte && + pstate->p_parent_cte->cterecursive); context = (stmt->op == SETOP_UNION ? "UNION" : (stmt->op == SETOP_INTERSECT ? "INTERSECT" : @@ -2231,7 +2231,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, setup_parser_errposition_callback(&pcbstate, pstate, bestlocation); - /* If it's a recursive union, we need to require hashing support. */ + /* + * If it's a recursive union, we need to require hashing + * support. + */ op->groupClauses = lappend(op->groupClauses, makeSortGroupClauseForSetOp(rescoltype, recursive)); diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index dafde68b207..e2baa9d852e 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -2004,7 +2004,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist, } if (IsA(node, A_Const)) { - A_Const *aconst = castNode(A_Const, node); + A_Const *aconst = castNode(A_Const, node); int targetlist_pos = 0; int target_pos; diff --git a/src/backend/parser/parse_collate.c b/src/backend/parser/parse_collate.c index 45dacc6c4c5..e90af4c4771 100644 --- a/src/backend/parser/parse_collate.c +++ b/src/backend/parser/parse_collate.c @@ -692,8 +692,11 @@ assign_collations_walker(Node *node, assign_collations_context *context) } break; case T_JsonExpr: - /* Context item and PASSING arguments are already - * marked with collations in parse_expr.c. */ + + /* + * Context item and PASSING arguments are already + * marked with collations in parse_expr.c. + */ break; default: diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index c1f194cc5b0..17709c3416b 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -3277,7 +3277,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve, if (exprtype == JSONOID || exprtype == JSONBOID) { - format = JS_FORMAT_DEFAULT; /* do not format json[b] types */ + format = JS_FORMAT_DEFAULT; /* do not format json[b] types */ ereport(WARNING, (errmsg("FORMAT JSON has no effect for json and jsonb types"), parser_errposition(pstate, ve->format->location))); @@ -3316,7 +3316,7 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve, format = default_format; } else if (exprtype == JSONOID || exprtype == JSONBOID) - format = JS_FORMAT_DEFAULT; /* do not format json[b] types */ + format = JS_FORMAT_DEFAULT; /* do not format json[b] types */ else format = default_format; @@ -3364,13 +3364,13 @@ transformJsonValueExprExt(ParseState *pstate, JsonValueExpr *ve, FuncExpr *fexpr; Oid fnoid; - if (cast_is_needed) /* only CAST is allowed */ + if (cast_is_needed) /* only CAST is allowed */ ereport(ERROR, (errcode(ERRCODE_CANNOT_COERCE), errmsg("cannot cast type %s to %s", format_type_be(exprtype), format_type_be(targettype)), - parser_errposition(pstate, location))); + parser_errposition(pstate, location))); fnoid = targettype == JSONOID ? F_TO_JSON : F_TO_JSONB; fexpr = makeFuncExpr(fnoid, targettype, list_make1(expr), @@ -3444,7 +3444,7 @@ checkJsonOutputFormat(ParseState *pstate, const JsonFormat *format, if (format->format_type == JS_FORMAT_JSON) { JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ? - format->encoding : JS_ENC_UTF8; + format->encoding : JS_ENC_UTF8; if (targettype != BYTEAOID && format->encoding != JS_ENC_DEFAULT) @@ -3583,6 +3583,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr, list_make2(texpr, enc), InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); + fexpr->location = location; return (Node *) fexpr; @@ -3591,7 +3592,7 @@ coerceJsonFuncExpr(ParseState *pstate, Node *expr, /* try to coerce expression to the output type */ res = coerce_to_target_type(pstate, expr, exprtype, returning->typid, returning->typmod, - /* XXX throwing errors when casting to char(N) */ + /* XXX throwing errors when casting to char(N) */ COERCION_EXPLICIT, COERCE_EXPLICIT_CAST, location); @@ -3616,7 +3617,7 @@ makeJsonConstructorExpr(ParseState *pstate, JsonConstructorType type, Node *placeholder; Node *coercion; Oid intermediate_typid = - returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID; + returning->format->format_type == JS_FORMAT_JSONB ? JSONBOID : JSONOID; jsctor->args = args; jsctor->func = fexpr; @@ -3694,7 +3695,7 @@ static Node * transformJsonArrayQueryConstructor(ParseState *pstate, JsonArrayQueryConstructor *ctor) { - SubLink *sublink = makeNode(SubLink); + SubLink *sublink = makeNode(SubLink); SelectStmt *select = makeNode(SelectStmt); RangeSubselect *range = makeNode(RangeSubselect); Alias *alias = makeNode(Alias); @@ -3766,8 +3767,8 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor, Oid aggfnoid; Node *node; Expr *aggfilter = agg_ctor->agg_filter ? (Expr *) - transformWhereClause(pstate, agg_ctor->agg_filter, - EXPR_KIND_FILTER, "FILTER") : NULL; + transformWhereClause(pstate, agg_ctor->agg_filter, + EXPR_KIND_FILTER, "FILTER") : NULL; aggfnoid = DatumGetInt32(DirectFunctionCall1(regprocin, CStringGetDatum(aggfn))); @@ -3809,7 +3810,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor, aggref->aggtype = aggtype; /* aggcollid and inputcollid will be set by parse_collate.c */ - aggref->aggtranstype = InvalidOid; /* will be set by planner */ + aggref->aggtranstype = InvalidOid; /* will be set by planner */ /* aggargtypes will be set by transformAggregateCall */ /* aggdirectargs and args will be set by transformAggregateCall */ /* aggorder and aggdistinct will be set by transformAggregateCall */ @@ -3818,7 +3819,7 @@ transformJsonAggConstructor(ParseState *pstate, JsonAggConstructor *agg_ctor, aggref->aggvariadic = false; aggref->aggkind = AGGKIND_NORMAL; /* agglevelsup will be set by transformAggregateCall */ - aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */ + aggref->aggsplit = AGGSPLIT_SIMPLE; /* planner might change this */ aggref->location = agg_ctor->location; transformAggregateCall(pstate, aggref, args, agg_ctor->agg_order, false); @@ -3860,14 +3861,13 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg) { if (agg->absent_on_null) if (agg->unique) - aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */ + aggfnname = "pg_catalog.jsonb_object_agg_unique_strict"; /* F_JSONB_OBJECT_AGG_UNIQUE_STRICT */ else - aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */ + aggfnname = "pg_catalog.jsonb_object_agg_strict"; /* F_JSONB_OBJECT_AGG_STRICT */ + else if (agg->unique) + aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */ else - if (agg->unique) - aggfnname = "pg_catalog.jsonb_object_agg_unique"; /* F_JSONB_OBJECT_AGG_UNIQUE */ - else - aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */ + aggfnname = "pg_catalog.jsonb_object_agg"; /* F_JSONB_OBJECT_AGG */ aggtype = JSONBOID; } @@ -3877,12 +3877,11 @@ transformJsonObjectAgg(ParseState *pstate, JsonObjectAgg *agg) if (agg->unique) aggfnname = "pg_catalog.json_object_agg_unique_strict"; /* F_JSON_OBJECT_AGG_UNIQUE_STRICT */ else - aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */ + aggfnname = "pg_catalog.json_object_agg_strict"; /* F_JSON_OBJECT_AGG_STRICT */ + else if (agg->unique) + aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */ else - if (agg->unique) - aggfnname = "pg_catalog.json_object_agg_unique"; /* F_JSON_OBJECT_AGG_UNIQUE */ - else - aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */ + aggfnname = "pg_catalog.json_object_agg"; /* F_JSON_OBJECT_AGG */ aggtype = JSONOID; } @@ -4209,7 +4208,7 @@ coerceJsonExpr(ParseState *pstate, Node *expr, const JsonReturning *returning) * Transform a JSON output clause of JSON_VALUE and JSON_QUERY. */ static void -transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func, +transformJsonFuncExprOutput(ParseState *pstate, JsonFuncExpr *func, JsonExpr *jsexpr) { Node *expr = jsexpr->formatted_expr; @@ -4333,19 +4332,19 @@ initJsonItemCoercions(ParseState *pstate, JsonItemCoercions *coercions, Oid typid; } *p, coercionTypids[] = - { - { &coercions->null, UNKNOWNOID }, - { &coercions->string, TEXTOID }, - { &coercions->numeric, NUMERICOID }, - { &coercions->boolean, BOOLOID }, - { &coercions->date, DATEOID }, - { &coercions->time, TIMEOID }, - { &coercions->timetz, TIMETZOID }, - { &coercions->timestamp, TIMESTAMPOID }, - { &coercions->timestamptz, TIMESTAMPTZOID }, - { &coercions->composite, contextItemTypeId }, - { NULL, InvalidOid } - }; + { + {&coercions->null, UNKNOWNOID}, + {&coercions->string, TEXTOID}, + {&coercions->numeric, NUMERICOID}, + {&coercions->boolean, BOOLOID}, + {&coercions->date, DATEOID}, + {&coercions->time, TIMEOID}, + {&coercions->timetz, TIMETZOID}, + {&coercions->timestamp, TIMESTAMPOID}, + {&coercions->timestamptz, TIMESTAMPTZOID}, + {&coercions->composite, contextItemTypeId}, + {NULL, InvalidOid} + }; for (p = coercionTypids; p->coercion; p++) *p->coercion = initJsonItemCoercion(pstate, p->typid, returning); @@ -4512,7 +4511,7 @@ static Node * transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr) { JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output, - "JSON()"); + "JSON()"); Node *arg; if (jsexpr->unique_keys) @@ -4544,8 +4543,8 @@ transformJsonParseExpr(ParseState *pstate, JsonParseExpr *jsexpr) } return makeJsonConstructorExpr(pstate, JSCTOR_JSON_PARSE, list_make1(arg), NULL, - returning, jsexpr->unique_keys, false, - jsexpr->location); + returning, jsexpr->unique_keys, false, + jsexpr->location); } /* @@ -4556,13 +4555,13 @@ transformJsonScalarExpr(ParseState *pstate, JsonScalarExpr *jsexpr) { Node *arg = transformExprRecurse(pstate, (Node *) jsexpr->expr); JsonReturning *returning = transformJsonConstructorRet(pstate, jsexpr->output, - "JSON_SCALAR()"); + "JSON_SCALAR()"); if (exprType(arg) == UNKNOWNOID) arg = coerce_to_specific_type(pstate, arg, TEXTOID, "JSON_SCALAR"); return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SCALAR, list_make1(arg), NULL, - returning, false, false, jsexpr->location); + returning, false, false, jsexpr->location); } /* @@ -4586,5 +4585,5 @@ transformJsonSerializeExpr(ParseState *pstate, JsonSerializeExpr *expr) } return makeJsonConstructorExpr(pstate, JSCTOR_JSON_SERIALIZE, list_make1(arg), - NULL, returning, false, false, expr->location); + NULL, returning, false, false, expr->location); } diff --git a/src/backend/parser/parse_jsontable.c b/src/backend/parser/parse_jsontable.c index 5ee63cf57f0..dbd3e66205d 100644 --- a/src/backend/parser/parse_jsontable.c +++ b/src/backend/parser/parse_jsontable.c @@ -33,31 +33,31 @@ /* Context for JSON_TABLE transformation */ typedef struct JsonTableContext { - ParseState *pstate; /* parsing state */ - JsonTable *table; /* untransformed node */ - TableFunc *tablefunc; /* transformed node */ - List *pathNames; /* list of all path and columns names */ - int pathNameId; /* path name id counter */ + ParseState *pstate; /* parsing state */ + JsonTable *table; /* untransformed node */ + TableFunc *tablefunc; /* transformed node */ + List *pathNames; /* list of all path and columns names */ + int pathNameId; /* path name id counter */ Oid contextItemTypid; /* type oid of context item (json/jsonb) */ } JsonTableContext; -static JsonTableParent * transformJsonTableColumns(JsonTableContext *cxt, - JsonTablePlan *plan, - List *columns, - char *pathSpec, - char **pathName, - int location); +static JsonTableParent *transformJsonTableColumns(JsonTableContext *cxt, + JsonTablePlan *plan, + List *columns, + char *pathSpec, + char **pathName, + int location); static Node * makeStringConst(char *str, int location) { - A_Const *n = makeNode(A_Const); + A_Const *n = makeNode(A_Const); n->val.node.type = T_String; n->val.sval.sval = str; n->location = location; - return (Node *)n; + return (Node *) n; } /* @@ -122,7 +122,7 @@ transformJsonTableColumn(JsonTableColumn *jtc, Node *contextItemExpr, static bool isJsonTablePathNameDuplicate(JsonTableContext *cxt, const char *pathname) { - ListCell *lc; + ListCell *lc; foreach(lc, cxt->pathNames) { @@ -342,7 +342,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan, foreach(lc, columns) { JsonTableColumn *jtc = castNode(JsonTableColumn, lfirst(lc)); - Node *node; + Node *node; if (jtc->coltype != JTC_NESTED) continue; @@ -369,10 +369,10 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan, } else { - Node *node1 = - transformJsonTableChildPlan(cxt, plan->plan1, columns); - Node *node2 = - transformJsonTableChildPlan(cxt, plan->plan2, columns); + Node *node1 = transformJsonTableChildPlan(cxt, plan->plan1, + columns); + Node *node2 = transformJsonTableChildPlan(cxt, plan->plan2, + columns); return makeJsonTableSiblingJoin(plan->join_type == JSTPJ_CROSS, node1, node2); @@ -396,7 +396,7 @@ transformJsonTableChildPlan(JsonTableContext *cxt, JsonTablePlan *plan, static bool typeIsComposite(Oid typid) { - char typtype; + char typtype; if (typid == JSONOID || typid == JSONBOID || @@ -406,7 +406,7 @@ typeIsComposite(Oid typid) typtype = get_typtype(typid); - if (typtype == TYPTYPE_COMPOSITE) + if (typtype == TYPTYPE_COMPOSITE) return true; if (typtype == TYPTYPE_DOMAIN) @@ -424,7 +424,7 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns) JsonTable *jt = cxt->table; TableFunc *tf = cxt->tablefunc; bool errorOnError = jt->on_error && - jt->on_error->btype == JSON_BEHAVIOR_ERROR; + jt->on_error->btype == JSON_BEHAVIOR_ERROR; foreach(col, columns) { @@ -436,24 +436,23 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns) if (rawc->name) { /* make sure column names are unique */ - ListCell *colname; + ListCell *colname; foreach(colname, tf->colnames) if (!strcmp((const char *) colname, rawc->name)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("column name \"%s\" is not unique", - rawc->name), - parser_errposition(pstate, rawc->location))); + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("column name \"%s\" is not unique", + rawc->name), + parser_errposition(pstate, rawc->location))); tf->colnames = lappend(tf->colnames, makeString(pstrdup(rawc->name))); } /* - * Determine the type and typmod for the new column. FOR - * ORDINALITY columns are INTEGER by standard; the others are - * user-specified. + * Determine the type and typmod for the new column. FOR ORDINALITY + * columns are INTEGER by standard; the others are user-specified. */ switch (rawc->coltype) { @@ -517,8 +516,8 @@ appendJsonTableColumns(JsonTableContext *cxt, List *columns) tf->coltypmods = lappend_int(tf->coltypmods, typmod); tf->colcollations = lappend_oid(tf->colcollations, type_is_collatable(typid) - ? DEFAULT_COLLATION_OID - : InvalidOid); + ? DEFAULT_COLLATION_OID + : InvalidOid); tf->colvalexprs = lappend(tf->colvalexprs, colexpr); } } @@ -571,7 +570,7 @@ transformJsonTableColumns(JsonTableContext *cxt, JsonTablePlan *plan, errdetail("JSON_TABLE columns must contain " "explicit AS pathname specification if " "explicit PLAN clause is used"), - parser_errposition(cxt->pstate, location))); + parser_errposition(cxt->pstate, location))); *pathName = generateJsonTablePathName(cxt); } @@ -662,14 +661,15 @@ transformJsonTable(ParseState *pstate, JsonTable *jt) registerAllJsonTableColumns(&cxt, jt->columns); -#if 0 /* XXX it' unclear from the standard whether root path name is mandatory or not */ +#if 0 /* XXX it' unclear from the standard whether + * root path name is mandatory or not */ if (plan && plan->plan_type != JSTP_DEFAULT && !rootPathName) { /* Assign root path name and create corresponding plan node */ JsonTablePlan *rootNode = makeNode(JsonTablePlan); JsonTablePlan *rootPlan = (JsonTablePlan *) - makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode, - (Node *) plan, jt->location); + makeJsonTableJoinedPlan(JSTPJ_OUTER, (Node *) rootNode, + (Node *) plan, jt->location); rootPathName = generateJsonTablePathName(&cxt); diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c index a49c985d36e..4d39cf95945 100644 --- a/src/backend/parser/parse_node.c +++ b/src/backend/parser/parse_node.c @@ -382,55 +382,56 @@ make_const(ParseState *pstate, A_Const *aconst) break; case T_Float: - { - /* could be an oversize integer as well as a float ... */ - - int64 val64; - char *endptr; - - errno = 0; - val64 = strtoi64(aconst->val.fval.fval, &endptr, 10); - if (errno == 0 && *endptr == '\0') { - /* - * It might actually fit in int32. Probably only INT_MIN can - * occur, but we'll code the test generally just to be sure. - */ - int32 val32 = (int32) val64; + /* could be an oversize integer as well as a float ... */ - if (val64 == (int64) val32) - { - val = Int32GetDatum(val32); + int64 val64; + char *endptr; - typeid = INT4OID; - typelen = sizeof(int32); - typebyval = true; + errno = 0; + val64 = strtoi64(aconst->val.fval.fval, &endptr, 10); + if (errno == 0 && *endptr == '\0') + { + /* + * It might actually fit in int32. Probably only INT_MIN + * can occur, but we'll code the test generally just to be + * sure. + */ + int32 val32 = (int32) val64; + + if (val64 == (int64) val32) + { + val = Int32GetDatum(val32); + + typeid = INT4OID; + typelen = sizeof(int32); + typebyval = true; + } + else + { + val = Int64GetDatum(val64); + + typeid = INT8OID; + typelen = sizeof(int64); + typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */ + } } else { - val = Int64GetDatum(val64); - - typeid = INT8OID; - typelen = sizeof(int64); - typebyval = FLOAT8PASSBYVAL; /* int8 and float8 alike */ + /* arrange to report location if numeric_in() fails */ + setup_parser_errposition_callback(&pcbstate, pstate, aconst->location); + val = DirectFunctionCall3(numeric_in, + CStringGetDatum(aconst->val.fval.fval), + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(-1)); + cancel_parser_errposition_callback(&pcbstate); + + typeid = NUMERICOID; + typelen = -1; /* variable len */ + typebyval = false; } + break; } - else - { - /* arrange to report location if numeric_in() fails */ - setup_parser_errposition_callback(&pcbstate, pstate, aconst->location); - val = DirectFunctionCall3(numeric_in, - CStringGetDatum(aconst->val.fval.fval), - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(-1)); - cancel_parser_errposition_callback(&pcbstate); - - typeid = NUMERICOID; - typelen = -1; /* variable len */ - typebyval = false; - } - break; - } case T_Boolean: val = BoolGetDatum(boolVal(&aconst->val)); diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c index 31a43e034c6..f668abfcb33 100644 --- a/src/backend/parser/parse_param.c +++ b/src/backend/parser/parse_param.c @@ -65,7 +65,7 @@ static bool query_contains_extern_params_walker(Node *node, void *context); */ void setup_parse_fixed_parameters(ParseState *pstate, - const Oid *paramTypes, int numParams) + const Oid *paramTypes, int numParams) { FixedParamState *parstate = palloc(sizeof(FixedParamState)); @@ -81,7 +81,7 @@ setup_parse_fixed_parameters(ParseState *pstate, */ void setup_parse_variable_parameters(ParseState *pstate, - Oid **paramTypes, int *numParams) + Oid **paramTypes, int *numParams) { VarParamState *parstate = palloc(sizeof(VarParamState)); diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 5448cb01fa7..00469763e88 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -1990,7 +1990,7 @@ addRangeTableEntryForTableFunc(ParseState *pstate, { RangeTblEntry *rte = makeNode(RangeTblEntry); char *refname = alias ? alias->aliasname : - pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table"); + pstrdup(tf->functype == TFT_XMLTABLE ? "xmltable" : "json_table"); Alias *eref; int numaliases; diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index df2dcbfb99e..8b6e0bd5953 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -91,8 +91,8 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached) * cached descriptor too. We determine that based on the pg_inherits.xmin * that was saved alongside that descriptor: if the xmin that was not in * progress for that active snapshot is also not in progress for the - * current active snapshot, then we can use it. Otherwise build one - * from scratch. + * current active snapshot, then we can use it. Otherwise build one from + * scratch. */ if (omit_detached && rel->rd_partdesc_nodetached && diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index f36c40e852f..2e146aac93b 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -984,7 +984,8 @@ rebuild_database_list(Oid newdb) hctl.keysize = sizeof(Oid); hctl.entrysize = sizeof(avl_dbase); hctl.hcxt = tmpcxt; - dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here FIXME */ + dbhash = hash_create("autovacuum db hash", 20, &hctl, /* magic number here + * FIXME */ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); /* start by inserting the new database */ @@ -1683,12 +1684,12 @@ AutoVacWorkerMain(int argc, char *argv[]) char dbname[NAMEDATALEN]; /* - * Report autovac startup to the cumulative stats system. We deliberately do - * this before InitPostgres, so that the last_autovac_time will get - * updated even if the connection attempt fails. This is to prevent - * autovac from getting "stuck" repeatedly selecting an unopenable - * database, rather than making any progress on stuff it can connect - * to. + * Report autovac startup to the cumulative stats system. We + * deliberately do this before InitPostgres, so that the + * last_autovac_time will get updated even if the connection attempt + * fails. This is to prevent autovac from getting "stuck" repeatedly + * selecting an unopenable database, rather than making any progress + * on stuff it can connect to. */ pgstat_report_autovac(dbid); diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index 30682b63b3f..40601aefd97 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -826,9 +826,9 @@ StartBackgroundWorker(void) /* * Create a per-backend PGPROC struct in shared memory, except in the - * EXEC_BACKEND case where this was done in SubPostmasterMain. We must - * do this before we can use LWLocks (and in the EXEC_BACKEND case we - * already had to do some stuff with LWLocks). + * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do + * this before we can use LWLocks (and in the EXEC_BACKEND case we already + * had to do some stuff with LWLocks). */ #ifndef EXEC_BACKEND InitProcess(); diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c index 8beff4a53cd..25e31c42e16 100644 --- a/src/backend/postmaster/pgarch.c +++ b/src/backend/postmaster/pgarch.c @@ -81,15 +81,14 @@ typedef struct PgArchData int pgprocno; /* pgprocno of archiver process */ /* - * Forces a directory scan in pgarch_readyXlog(). Protected by - * arch_lck. + * Forces a directory scan in pgarch_readyXlog(). Protected by arch_lck. */ bool force_dir_scan; slock_t arch_lck; } PgArchData; -char *XLogArchiveLibrary = ""; +char *XLogArchiveLibrary = ""; /* ---------- @@ -143,7 +142,7 @@ static bool pgarch_readyXlog(char *xlog); static void pgarch_archiveDone(char *xlog); static void pgarch_die(int code, Datum arg); static void HandlePgArchInterrupts(void); -static int ready_file_comparator(Datum a, Datum b, void *arg); +static int ready_file_comparator(Datum a, Datum b, void *arg); static void LoadArchiveLibrary(void); static void call_archive_module_shutdown_callback(int code, Datum arg); @@ -579,13 +578,13 @@ pgarch_readyXlog(char *xlog) /* * If we still have stored file names from the previous directory scan, - * try to return one of those. We check to make sure the status file - * is still present, as the archive_command for a previous file may - * have already marked it done. + * try to return one of those. We check to make sure the status file is + * still present, as the archive_command for a previous file may have + * already marked it done. */ while (arch_files->arch_files_size > 0) { - struct stat st; + struct stat st; char status_file[MAXPGPATH]; char *arch_file; @@ -655,8 +654,8 @@ pgarch_readyXlog(char *xlog) CStringGetDatum(basename), NULL) > 0) { /* - * Remove the lowest priority file and add the current one to - * the heap. + * Remove the lowest priority file and add the current one to the + * heap. */ arch_file = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap)); strcpy(arch_file, basename); @@ -677,8 +676,8 @@ pgarch_readyXlog(char *xlog) binaryheap_build(arch_files->arch_heap); /* - * Fill arch_files array with the files to archive in ascending order - * of priority. + * Fill arch_files array with the files to archive in ascending order of + * priority. */ arch_files->arch_files_size = arch_files->arch_heap->bh_size; for (int i = 0; i < arch_files->arch_files_size; i++) @@ -702,10 +701,10 @@ pgarch_readyXlog(char *xlog) static int ready_file_comparator(Datum a, Datum b, void *arg) { - char *a_str = DatumGetCString(a); - char *b_str = DatumGetCString(b); - bool a_history = IsTLHistoryFileName(a_str); - bool b_history = IsTLHistoryFileName(b_str); + char *a_str = DatumGetCString(a); + char *b_str = DatumGetCString(b); + bool a_history = IsTLHistoryFileName(a_str); + bool b_history = IsTLHistoryFileName(b_str); /* Timeline history files always have the highest priority. */ if (a_history != b_history) @@ -793,8 +792,8 @@ HandlePgArchInterrupts(void) if (archiveLibChanged) { /* - * Call the currently loaded archive module's shutdown callback, if - * one is defined. + * Call the currently loaded archive module's shutdown callback, + * if one is defined. */ call_archive_module_shutdown_callback(0, 0); @@ -803,8 +802,8 @@ HandlePgArchInterrupts(void) * load the new one, but there is presently no mechanism for * unloading a library (see the comment above * internal_load_library()). To deal with this, we simply restart - * the archiver. The new archive module will be loaded when the new - * archiver process starts up. + * the archiver. The new archive module will be loaded when the + * new archiver process starts up. */ ereport(LOG, (errmsg("restarting archiver process because value of " @@ -828,9 +827,8 @@ LoadArchiveLibrary(void) memset(&ArchiveContext, 0, sizeof(ArchiveModuleCallbacks)); /* - * If shell archiving is enabled, use our special initialization - * function. Otherwise, load the library and call its - * _PG_archive_module_init(). + * If shell archiving is enabled, use our special initialization function. + * Otherwise, load the library and call its _PG_archive_module_init(). */ if (XLogArchiveLibrary[0] == '\0') archive_init = shell_archive_init; diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 8e61b3471ca..bf591f048d4 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2859,8 +2859,8 @@ pmdie(SIGNAL_ARGS) /* * If we reached normal running, we go straight to waiting for - * client backends to exit. If already in PM_STOP_BACKENDS or - * a later state, do not change it. + * client backends to exit. If already in PM_STOP_BACKENDS or a + * later state, do not change it. */ if (pmState == PM_RUN || pmState == PM_HOT_STANDBY) connsAllowed = false; diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c index 29cf8f18e1a..f99186eab7d 100644 --- a/src/backend/postmaster/startup.c +++ b/src/backend/postmaster/startup.c @@ -75,7 +75,7 @@ static volatile sig_atomic_t startup_progress_timer_expired = false; /* * Time between progress updates for long-running startup operations. */ -int log_startup_progress_interval = 10000; /* 10 sec */ +int log_startup_progress_interval = 10000; /* 10 sec */ /* Signal handlers */ static void StartupProcTriggerHandler(SIGNAL_ARGS); diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c index 77aebb244cb..e926f8c27c7 100644 --- a/src/backend/postmaster/walwriter.c +++ b/src/backend/postmaster/walwriter.c @@ -297,9 +297,9 @@ HandleWalWriterInterrupts(void) /* * Force reporting remaining WAL statistics at process exit. * - * Since pgstat_report_wal is invoked with 'force' is false in main loop - * to avoid overloading the cumulative stats system, there may exist - * unreported stats counters for the WAL writer. + * Since pgstat_report_wal is invoked with 'force' is false in main + * loop to avoid overloading the cumulative stats system, there may + * exist unreported stats counters for the WAL writer. */ pgstat_report_wal(true); diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c index 6e84f42cb24..e1f9df09180 100644 --- a/src/backend/regex/regc_pg_locale.c +++ b/src/backend/regex/regc_pg_locale.c @@ -234,8 +234,8 @@ pg_set_regex_collation(Oid collation) if (!OidIsValid(collation)) { /* - * This typically means that the parser could not resolve a - * conflict of implicit collations, so report it that way. + * This typically means that the parser could not resolve a conflict + * of implicit collations, so report it that way. */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), @@ -253,9 +253,9 @@ pg_set_regex_collation(Oid collation) else { /* - * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T; - * the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not - * have to be considered below. + * NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T; the + * case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not have to + * be considered below. */ pg_regex_locale = pg_newlocale_from_collation(collation); diff --git a/src/backend/replication/backup_manifest.c b/src/backend/replication/backup_manifest.c index 7e22f9e48cf..d47ab4c41e3 100644 --- a/src/backend/replication/backup_manifest.c +++ b/src/backend/replication/backup_manifest.c @@ -312,7 +312,7 @@ AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr, * Finalize the backup manifest, and send it to the client. */ void -SendBackupManifest(backup_manifest_info *manifest, bbsink * sink) +SendBackupManifest(backup_manifest_info *manifest, bbsink *sink) { uint8 checksumbuf[PG_SHA256_DIGEST_LENGTH]; char checksumstringbuf[PG_SHA256_DIGEST_STRING_LENGTH]; diff --git a/src/backend/replication/basebackup_copy.c b/src/backend/replication/basebackup_copy.c index 90daeff09ce..cabb0772402 100644 --- a/src/backend/replication/basebackup_copy.c +++ b/src/backend/replication/basebackup_copy.c @@ -124,18 +124,18 @@ bbsink_copystream_begin_backup(bbsink *sink) { bbsink_copystream *mysink = (bbsink_copystream *) sink; bbsink_state *state = sink->bbs_state; - char *buf; + char *buf; /* * Initialize buffer. We ultimately want to send the archive and manifest * data by means of CopyData messages where the payload portion of each * message begins with a type byte. However, basebackup.c expects the * buffer to be aligned, so we can't just allocate one extra byte for the - * type byte. Instead, allocate enough extra bytes that the portion of - * the buffer we reveal to our callers can be aligned, while leaving room - * to slip the type byte in just beforehand. That will allow us to ship - * the data with a single call to pq_putmessage and without needing any - * extra copying. + * type byte. Instead, allocate enough extra bytes that the portion of the + * buffer we reveal to our callers can be aligned, while leaving room to + * slip the type byte in just beforehand. That will allow us to ship the + * data with a single call to pq_putmessage and without needing any extra + * copying. */ buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF); mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1); diff --git a/src/backend/replication/basebackup_gzip.c b/src/backend/replication/basebackup_gzip.c index 44f28ceba45..ef2b954946a 100644 --- a/src/backend/replication/basebackup_gzip.c +++ b/src/backend/replication/basebackup_gzip.c @@ -68,7 +68,7 @@ bbsink_gzip_new(bbsink *next, pg_compress_specification *compress) return NULL; /* keep compiler quiet */ #else bbsink_gzip *sink; - int compresslevel; + int compresslevel; Assert(next != NULL); @@ -118,8 +118,8 @@ static void bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name) { bbsink_gzip *mysink = (bbsink_gzip *) sink; - char *gz_archive_name; - z_stream *zs = &mysink->zstream; + char *gz_archive_name; + z_stream *zs = &mysink->zstream; /* Initialize compressor object. */ memset(zs, 0, sizeof(z_stream)); @@ -129,10 +129,10 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name) zs->avail_out = sink->bbs_next->bbs_buffer_length; /* - * We need to use deflateInit2() rather than deflateInit() here so that - * we can request a gzip header rather than a zlib header. Otherwise, we - * want to supply the same values that would have been used by default - * if we had just called deflateInit(). + * We need to use deflateInit2() rather than deflateInit() here so that we + * can request a gzip header rather than a zlib header. Otherwise, we want + * to supply the same values that would have been used by default if we + * had just called deflateInit(). * * Per the documentation for deflateInit2, the third argument must be * Z_DEFLATED; the fourth argument is the number of "window bits", by @@ -147,9 +147,8 @@ bbsink_gzip_begin_archive(bbsink *sink, const char *archive_name) errmsg("could not initialize compression library")); /* - * Add ".gz" to the archive name. Note that the pg_basebackup -z - * produces archives named ".tar.gz" rather than ".tgz", so we match - * that here. + * Add ".gz" to the archive name. Note that the pg_basebackup -z produces + * archives named ".tar.gz" rather than ".tgz", so we match that here. */ gz_archive_name = psprintf("%s.gz", archive_name); Assert(sink->bbs_next != NULL); @@ -172,7 +171,7 @@ static void bbsink_gzip_archive_contents(bbsink *sink, size_t len) { bbsink_gzip *mysink = (bbsink_gzip *) sink; - z_stream *zs = &mysink->zstream; + z_stream *zs = &mysink->zstream; /* Compress data from input buffer. */ zs->next_in = (uint8 *) mysink->base.bbs_buffer; @@ -180,7 +179,7 @@ bbsink_gzip_archive_contents(bbsink *sink, size_t len) while (zs->avail_in > 0) { - int res; + int res; /* Write output data into unused portion of output buffer. */ Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length); @@ -230,7 +229,7 @@ static void bbsink_gzip_end_archive(bbsink *sink) { bbsink_gzip *mysink = (bbsink_gzip *) sink; - z_stream *zs = &mysink->zstream; + z_stream *zs = &mysink->zstream; /* There is no more data available. */ zs->next_in = (uint8 *) mysink->base.bbs_buffer; @@ -238,7 +237,7 @@ bbsink_gzip_end_archive(bbsink *sink) while (1) { - int res; + int res; /* Write output data into unused portion of output buffer. */ Assert(mysink->bytes_written < mysink->base.bbs_next->bbs_buffer_length); @@ -248,8 +247,8 @@ bbsink_gzip_end_archive(bbsink *sink) mysink->base.bbs_next->bbs_buffer_length - mysink->bytes_written; /* - * As bbsink_gzip_archive_contents, but pass Z_FINISH since there - * is no more input. + * As bbsink_gzip_archive_contents, but pass Z_FINISH since there is + * no more input. */ res = deflate(zs, Z_FINISH); if (res == Z_STREAM_ERROR) @@ -260,8 +259,8 @@ bbsink_gzip_end_archive(bbsink *sink) mysink->base.bbs_next->bbs_buffer_length - zs->avail_out; /* - * Apparently we had no data in the output buffer and deflate() - * was not able to add any. We must be done. + * Apparently we had no data in the output buffer and deflate() was + * not able to add any. We must be done. */ if (mysink->bytes_written == 0) break; diff --git a/src/backend/replication/basebackup_lz4.c b/src/backend/replication/basebackup_lz4.c index 65e774eff62..c9d19b6c448 100644 --- a/src/backend/replication/basebackup_lz4.c +++ b/src/backend/replication/basebackup_lz4.c @@ -68,7 +68,7 @@ bbsink_lz4_new(bbsink *next, pg_compress_specification *compress) return NULL; /* keep compiler quiet */ #else bbsink_lz4 *sink; - int compresslevel; + int compresslevel; Assert(next != NULL); diff --git a/src/backend/replication/basebackup_server.c b/src/backend/replication/basebackup_server.c index 54e6829d2be..9b4847d90cc 100644 --- a/src/backend/replication/basebackup_server.c +++ b/src/backend/replication/basebackup_server.c @@ -77,10 +77,11 @@ bbsink_server_new(bbsink *next, char *pathname) /* * It's not a good idea to store your backups in the same directory that - * you're backing up. If we allowed a relative path here, that could easily - * happen accidentally, so we don't. The user could still accomplish the - * same thing by including the absolute path to $PGDATA in the pathname, - * but that's likely an intentional bad decision rather than an accident. + * you're backing up. If we allowed a relative path here, that could + * easily happen accidentally, so we don't. The user could still + * accomplish the same thing by including the absolute path to $PGDATA in + * the pathname, but that's likely an intentional bad decision rather than + * an accident. */ if (!is_absolute_path(pathname)) ereport(ERROR, @@ -90,14 +91,15 @@ bbsink_server_new(bbsink *next, char *pathname) switch (pg_check_dir(pathname)) { case 0: + /* - * Does not exist, so create it using the same permissions we'd use - * for a new subdirectory of the data directory itself. + * Does not exist, so create it using the same permissions we'd + * use for a new subdirectory of the data directory itself. */ if (MakePGDirectory(pathname) < 0) ereport(ERROR, - (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", pathname))); + (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", pathname))); break; case 1: diff --git a/src/backend/replication/basebackup_target.c b/src/backend/replication/basebackup_target.c index 243a2bacfef..9f73457320e 100644 --- a/src/backend/replication/basebackup_target.c +++ b/src/backend/replication/basebackup_target.c @@ -80,9 +80,9 @@ BaseBackupAddTarget(char *name, /* * We found one, so update it. * - * It is probably not a great idea to call BaseBackupAddTarget - * for the same name multiple times, but if it happens, this - * seems like the sanest behavior. + * It is probably not a great idea to call BaseBackupAddTarget for + * the same name multiple times, but if it happens, this seems + * like the sanest behavior. */ ttype->check_detail = check_detail; ttype->get_sink = get_sink; @@ -91,9 +91,9 @@ BaseBackupAddTarget(char *name, } /* - * We use TopMemoryContext for allocations here to make sure that the - * data we need doesn't vanish under us; that's also why we copy the - * target name into a newly-allocated chunk of memory. + * We use TopMemoryContext for allocations here to make sure that the data + * we need doesn't vanish under us; that's also why we copy the target + * name into a newly-allocated chunk of memory. */ oldcontext = MemoryContextSwitchTo(TopMemoryContext); ttype = palloc(sizeof(BaseBackupTargetType)); diff --git a/src/backend/replication/basebackup_zstd.c b/src/backend/replication/basebackup_zstd.c index d767b26f4e3..b23a37b29ed 100644 --- a/src/backend/replication/basebackup_zstd.c +++ b/src/backend/replication/basebackup_zstd.c @@ -108,9 +108,9 @@ bbsink_zstd_begin_backup(bbsink *sink) if ((compress->options & PG_COMPRESSION_OPTION_WORKERS) != 0) { /* - * On older versions of libzstd, this option does not exist, and trying - * to set it will fail. Similarly for newer versions if they are - * compiled without threading support. + * On older versions of libzstd, this option does not exist, and + * trying to set it will fail. Similarly for newer versions if they + * are compiled without threading support. */ ret = ZSTD_CCtx_setParameter(mysink->cctx, ZSTD_c_nbWorkers, compress->workers); diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 6303647fe0f..aa2427ba73f 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -92,7 +92,7 @@ LogicalDecodingProcessRecord(LogicalDecodingContext *ctx, XLogReaderState *recor { XLogRecordBuffer buf; TransactionId txid; - RmgrData rmgr; + RmgrData rmgr; buf.origptr = ctx->reader->ReadRecPtr; buf.endptr = ctx->reader->EndRecPtr; diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index 0adb2d1d665..6a4b2d43063 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -344,9 +344,9 @@ retry: } /* - * We don't allow to invoke more sync workers once we have reached the sync - * worker limit per subscription. So, just return silently as we might get - * here because of an otherwise harmless race condition. + * We don't allow to invoke more sync workers once we have reached the + * sync worker limit per subscription. So, just return silently as we + * might get here because of an otherwise harmless race condition. */ if (OidIsValid(relid) && nsyncworkers >= max_sync_workers_per_subscription) { diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index 6887dc23f61..da7bd1321cb 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -646,8 +646,8 @@ ReorderBufferTXNByXid(ReorderBuffer *rb, TransactionId xid, bool create, } /* - * If the cache wasn't hit or it yielded a "does-not-exist" and we want - * to create an entry. + * If the cache wasn't hit or it yielded a "does-not-exist" and we want to + * create an entry. */ /* search the lookup table */ diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c index 49ceec3bdc8..61aee61b8ee 100644 --- a/src/backend/replication/logical/tablesync.c +++ b/src/backend/replication/logical/tablesync.c @@ -786,11 +786,11 @@ fetch_remote_table_info(char *nspname, char *relname, /* * Fetch info about column lists for the relation (from all the - * publications). We unnest the int2vector values, because that - * makes it easier to combine lists by simply adding the attnums - * to a new bitmap (without having to parse the int2vector data). - * This preserves NULL values, so that if one of the publications - * has no column list, we'll know that. + * publications). We unnest the int2vector values, because that makes + * it easier to combine lists by simply adding the attnums to a new + * bitmap (without having to parse the int2vector data). This + * preserves NULL values, so that if one of the publications has no + * column list, we'll know that. */ resetStringInfo(&cmd); appendStringInfo(&cmd, @@ -816,15 +816,15 @@ fetch_remote_table_info(char *nspname, char *relname, nspname, relname, pubres->err))); /* - * Merge the column lists (from different publications) by creating - * a single bitmap with all the attnums. If we find a NULL value, - * that means one of the publications has no column list for the - * table we're syncing. + * Merge the column lists (from different publications) by creating a + * single bitmap with all the attnums. If we find a NULL value, that + * means one of the publications has no column list for the table + * we're syncing. */ slot = MakeSingleTupleTableSlot(pubres->tupledesc, &TTSOpsMinimalTuple); while (tuplestore_gettupleslot(pubres->tuplestore, true, false, slot)) { - Datum cfval = slot_getattr(slot, 1, &isnull); + Datum cfval = slot_getattr(slot, 1, &isnull); /* NULL means empty column list, so we're done. */ if (isnull) @@ -835,7 +835,7 @@ fetch_remote_table_info(char *nspname, char *relname, } included_cols = bms_add_member(included_cols, - DatumGetInt16(cfval)); + DatumGetInt16(cfval)); ExecClearTuple(slot); } @@ -1056,8 +1056,8 @@ copy_table(Relation rel) quote_qualified_identifier(lrel.nspname, lrel.relname)); /* - * XXX Do we need to list the columns in all cases? Maybe we're replicating - * all columns? + * XXX Do we need to list the columns in all cases? Maybe we're + * replicating all columns? */ for (int i = 0; i < lrel.natts; i++) { @@ -1321,10 +1321,10 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos) /* * COPY FROM does not honor RLS policies. That is not a problem for - * subscriptions owned by roles with BYPASSRLS privilege (or superuser, who - * has it implicitly), but other roles should not be able to circumvent - * RLS. Disallow logical replication into RLS enabled relations for such - * roles. + * subscriptions owned by roles with BYPASSRLS privilege (or superuser, + * who has it implicitly), but other roles should not be able to + * circumvent RLS. Disallow logical replication into RLS enabled + * relations for such roles. */ if (check_enable_rls(RelationGetRelid(rel), InvalidOid, false) == RLS_ENABLED) ereport(ERROR, diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 7da7823c352..725a21b55ec 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -1608,8 +1608,8 @@ GetRelationIdentityOrPK(Relation rel) static void TargetPrivilegesCheck(Relation rel, AclMode mode) { - Oid relid; - AclResult aclresult; + Oid relid; + AclResult aclresult; relid = RelationGetRelid(rel); aclresult = pg_class_aclcheck(relid, GetUserId(), mode); diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 406ad84e1d6..42c06af2391 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -174,8 +174,8 @@ typedef struct RelationSyncEntry Bitmapset *columns; /* - * Private context to store additional data for this entry - state for - * the row filter expressions, column list, etc. + * Private context to store additional data for this entry - state for the + * row filter expressions, column list, etc. */ MemoryContext entry_cxt; } RelationSyncEntry; @@ -206,9 +206,8 @@ typedef struct RelationSyncEntry */ typedef struct PGOutputTxnData { - bool sent_begin_txn; /* flag indicating whether BEGIN has - * been sent */ -} PGOutputTxnData; + bool sent_begin_txn; /* flag indicating whether BEGIN has been sent */ +} PGOutputTxnData; /* Map used to remember which relation schemas we sent. */ static HTAB *RelationSyncCache = NULL; @@ -511,9 +510,9 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, * using bandwidth on something with little/no use for logical replication. */ static void -pgoutput_begin_txn(LogicalDecodingContext * ctx, ReorderBufferTXN * txn) +pgoutput_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { - PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context, + PGOutputTxnData *txndata = MemoryContextAllocZero(ctx->context, sizeof(PGOutputTxnData)); txn->output_plugin_private = txndata; @@ -987,7 +986,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications, * * All the given publication-table mappings must be checked. * - * Multiple publications might have multiple column lists for this relation. + * Multiple publications might have multiple column lists for this + * relation. * * FOR ALL TABLES and FOR ALL TABLES IN SCHEMA implies "don't use column * list" so it takes precedence. @@ -1005,8 +1005,9 @@ pgoutput_column_list_init(PGOutputData *data, List *publications, bool pub_no_list = true; /* - * If the publication is FOR ALL TABLES then it is treated the same as if - * there are no column lists (even if other publications have a list). + * If the publication is FOR ALL TABLES then it is treated the same as + * if there are no column lists (even if other publications have a + * list). */ if (!pub->alltables) { @@ -1014,8 +1015,8 @@ pgoutput_column_list_init(PGOutputData *data, List *publications, * Check for the presence of a column list in this publication. * * Note: If we find no pg_publication_rel row, it's a publication - * defined for a whole schema, so it can't have a column list, just - * like a FOR ALL TABLES publication. + * defined for a whole schema, so it can't have a column list, + * just like a FOR ALL TABLES publication. */ cftuple = SearchSysCache2(PUBLICATIONRELMAP, ObjectIdGetDatum(entry->publish_as_relid), @@ -1221,9 +1222,9 @@ pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot, * For updates, we can have only a new tuple when none of the replica * identity columns changed and none of those columns have external data * but we still need to evaluate the row filter for the new tuple as the - * existing values of those columns might not match the filter. Also, users - * can use constant expressions in the row filter, so we anyway need to - * evaluate it for the new tuple. + * existing values of those columns might not match the filter. Also, + * users can use constant expressions in the row filter, so we anyway need + * to evaluate it for the new tuple. * * For deletes, we only have the old tuple. */ @@ -1674,8 +1675,7 @@ pgoutput_message(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, xid = txn->xid; /* - * Output BEGIN if we haven't yet. Avoid for non-transactional - * messages. + * Output BEGIN if we haven't yet. Avoid for non-transactional messages. */ if (transactional) { @@ -2079,15 +2079,15 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) /* * Under what relid should we publish changes in this publication? - * We'll use the top-most relid across all publications. Also track - * the ancestor level for this publication. + * We'll use the top-most relid across all publications. Also + * track the ancestor level for this publication. */ - Oid pub_relid = relid; - int ancestor_level = 0; + Oid pub_relid = relid; + int ancestor_level = 0; /* - * If this is a FOR ALL TABLES publication, pick the partition root - * and set the ancestor level accordingly. + * If this is a FOR ALL TABLES publication, pick the partition + * root and set the ancestor level accordingly. */ if (pub->alltables) { @@ -2156,18 +2156,18 @@ get_rel_sync_entry(PGOutputData *data, Relation relation) /* * We want to publish the changes as the top-most ancestor - * across all publications. So we need to check if the - * already calculated level is higher than the new one. If - * yes, we can ignore the new value (as it's a child). - * Otherwise the new value is an ancestor, so we keep it. + * across all publications. So we need to check if the already + * calculated level is higher than the new one. If yes, we can + * ignore the new value (as it's a child). Otherwise the new + * value is an ancestor, so we keep it. */ if (publish_ancestor_level > ancestor_level) continue; /* - * If we found an ancestor higher up in the tree, discard - * the list of publications through which we replicate it, - * and use the new ancestor. + * If we found an ancestor higher up in the tree, discard the + * list of publications through which we replicate it, and use + * the new ancestor. */ if (publish_ancestor_level < ancestor_level) { diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 5c778f5333b..e5c2102bcd5 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -504,8 +504,8 @@ retry: MyReplicationSlot = s; /* - * The call to pgstat_acquire_replslot() protects against stats for - * a different slot, from before a restart or such, being present during + * The call to pgstat_acquire_replslot() protects against stats for a + * different slot, from before a restart or such, being present during * pgstat_report_replslot(). */ if (SlotIsLogical(s)) diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index 3c9411e2213..b369d28a806 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -1406,9 +1406,9 @@ pg_stat_get_wal_receiver(PG_FUNCTION_ARGS) if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS)) { /* - * Only superusers and roles with privileges of pg_read_all_stats - * can see details. Other users only get the pid value to know whether - * it is a WAL receiver, but no details. + * Only superusers and roles with privileges of pg_read_all_stats can + * see details. Other users only get the pid value to know whether it + * is a WAL receiver, but no details. */ MemSet(&nulls[1], true, sizeof(bool) * (tupdesc->natts - 1)); } diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index c6c196b2fab..e42671722a8 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -1505,9 +1505,9 @@ WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId * When skipping empty transactions in synchronous replication, we send a * keepalive message to avoid delaying such transactions. * - * It is okay to check sync_standbys_defined flag without lock here as - * in the worst case we will just send an extra keepalive message when it - * is really not required. + * It is okay to check sync_standbys_defined flag without lock here as in + * the worst case we will just send an extra keepalive message when it is + * really not required. */ if (skipped_xact && SyncRepRequested() && diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index b6f31849616..c1c27e67d47 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -354,7 +354,7 @@ statext_dependencies_build(StatsBuildData *data) /* result */ MVDependencies *dependencies = NULL; - MemoryContext cxt; + MemoryContext cxt; Assert(data->nattnums >= 2); diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index e02ea3a977c..ae13011d275 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -673,9 +673,8 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, { /* * It's now safe to pin the buffer. We can't pin first and ask - * questions later, because it might confuse code paths - * like InvalidateBuffer() if we pinned a random non-matching - * buffer. + * questions later, because it might confuse code paths like + * InvalidateBuffer() if we pinned a random non-matching buffer. */ if (have_private_ref) PinBuffer(bufHdr, NULL); /* bump pin count */ @@ -2945,10 +2944,10 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind)) { /* - * Not every table AM uses BLCKSZ wide fixed size blocks. - * Therefore tableam returns the size in bytes - but for the - * purpose of this routine, we want the number of blocks. - * Therefore divide, rounding up. + * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore + * tableam returns the size in bytes - but for the purpose of this + * routine, we want the number of blocks. Therefore divide, rounding + * up. */ uint64 szbytes; @@ -2958,7 +2957,7 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum) } else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) { - return smgrnblocks(RelationGetSmgr(relation), forkNum); + return smgrnblocks(RelationGetSmgr(relation), forkNum); } else Assert(false); @@ -3707,9 +3706,9 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum, BufferAccessStrategy bstrategy_dst; /* - * In general, we want to write WAL whenever wal_level > 'minimal', but - * we can skip it when copying any fork of an unlogged relation other - * than the init fork. + * In general, we want to write WAL whenever wal_level > 'minimal', but we + * can skip it when copying any fork of an unlogged relation other than + * the init fork. */ use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM); @@ -3779,9 +3778,9 @@ void CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode, bool permanent) { - Relation src_rel; - Relation dst_rel; - char relpersistence; + Relation src_rel; + Relation dst_rel; + char relpersistence; /* Set the relpersistence. */ relpersistence = permanent ? @@ -3789,9 +3788,9 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode, /* * We can't use a real relcache entry for a relation in some other - * database, but since we're only going to access the fields related - * to physical storage, a fake one is good enough. If we didn't do this - * and used the smgr layer directly, we would have to worry about + * database, but since we're only going to access the fields related to + * physical storage, a fake one is good enough. If we didn't do this and + * used the smgr layer directly, we would have to worry about * invalidations. */ src_rel = CreateFakeRelcacheEntry(src_rnode); diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index 25c310f6757..ca22336e35d 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1172,8 +1172,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running) * * We have to sort them logically, because in KnownAssignedXidsAdd we * call TransactionIdFollowsOrEquals and so on. But we know these XIDs - * come from RUNNING_XACTS, which means there are only normal XIDs from - * the same epoch, so this is safe. + * come from RUNNING_XACTS, which means there are only normal XIDs + * from the same epoch, so this is safe. */ qsort(xids, nxids, sizeof(TransactionId), xidLogicalComparator); diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 603cf9b0fa7..6139c622e0b 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -534,9 +534,9 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait, } /* - * If the caller has requested force flush or we have written more than 1/4 - * of the ring size, mark it as written in shared memory and notify the - * receiver. + * If the caller has requested force flush or we have written more than + * 1/4 of the ring size, mark it as written in shared memory and notify + * the receiver. */ if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2)) { diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c index 2861c03e04b..59310b708fb 100644 --- a/src/backend/storage/ipc/sinvaladt.c +++ b/src/backend/storage/ipc/sinvaladt.c @@ -208,10 +208,11 @@ SInvalShmemSize(void) /* * In Hot Standby mode, the startup process requests a procState array - * slot using InitRecoveryTransactionEnvironment(). Even though MaxBackends - * doesn't account for the startup process, it is guaranteed to get a - * free slot. This is because the autovacuum launcher and worker processes, - * which are included in MaxBackends, are not started in Hot Standby mode. + * slot using InitRecoveryTransactionEnvironment(). Even though + * MaxBackends doesn't account for the startup process, it is guaranteed + * to get a free slot. This is because the autovacuum launcher and worker + * processes, which are included in MaxBackends, are not started in Hot + * Standby mode. */ size = add_size(size, mul_size(sizeof(ProcState), MaxBackends)); diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index cc15396789b..a3d367db511 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -795,7 +795,7 @@ PageRepairFragmentation(Page page) if (finalusedlp != nline) { /* The last line pointer is not the last used line pointer */ - int nunusedend = nline - finalusedlp; + int nunusedend = nline - finalusedlp; Assert(nunused >= nunusedend && nunusedend > 0); diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 304cce135aa..8b6b5bbaaab 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -655,7 +655,7 @@ pg_analyze_and_rewrite_fixedparams(RawStmt *parsetree, ResetUsage(); query = parse_analyze_fixedparams(parsetree, query_string, paramTypes, numParams, - queryEnv); + queryEnv); if (log_parser_stats) ShowUsage("PARSE ANALYSIS STATISTICS"); @@ -694,7 +694,7 @@ pg_analyze_and_rewrite_varparams(RawStmt *parsetree, ResetUsage(); query = parse_analyze_varparams(parsetree, query_string, paramTypes, numParams, - queryEnv); + queryEnv); /* * Check all parameter types got determined. @@ -1164,7 +1164,7 @@ exec_simple_query(const char *query_string) oldcontext = MemoryContextSwitchTo(MessageContext); querytree_list = pg_analyze_and_rewrite_fixedparams(parsetree, query_string, - NULL, 0, NULL); + NULL, 0, NULL); plantree_list = pg_plan_queries(querytree_list, query_string, CURSOR_OPT_PARALLEL_OK, NULL); @@ -4377,11 +4377,12 @@ PostgresMain(const char *dbname, const char *username) * Note: this includes fflush()'ing the last of the prior output. * * This is also a good time to flush out collected statistics to the - * cumulative stats system, and to update the PS stats display. We avoid doing - * those every time through the message loop because it'd slow down - * processing of batched messages, and because we don't want to report - * uncommitted updates (that confuses autovacuum). The notification - * processor wants a call too, if we are not in a transaction block. + * cumulative stats system, and to update the PS stats display. We + * avoid doing those every time through the message loop because it'd + * slow down processing of batched messages, and because we don't want + * to report uncommitted updates (that confuses autovacuum). The + * notification processor wants a call too, if we are not in a + * transaction block. * * Also, if an idle timeout is enabled, start the timer for that. */ @@ -4415,7 +4416,7 @@ PostgresMain(const char *dbname, const char *username) } else { - long stats_timeout; + long stats_timeout; /* * Process incoming notifies (including self-notifies), if @@ -4470,8 +4471,9 @@ PostgresMain(const char *dbname, const char *username) /* * (4) turn off the idle-in-transaction, idle-session and - * idle-stats-update timeouts if active. We do this before step (5) so - * that any last-moment timeout is certain to be detected in step (5). + * idle-stats-update timeouts if active. We do this before step (5) + * so that any last-moment timeout is certain to be detected in step + * (5). * * At most one of these timeouts will be active, so there's no need to * worry about combining the timeout.c calls into one. diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 78e951a6bca..2570e5e6301 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -3996,7 +3996,8 @@ hash_array(PG_FUNCTION_ARGS) /* * Make fake type cache entry structure. Note that we can't just - * modify typentry, since that points directly into the type cache. + * modify typentry, since that points directly into the type + * cache. */ record_typentry = palloc0(sizeof(*record_typentry)); record_typentry->type_id = element_type; diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 0576764ac4b..b4a2c8d2197 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -112,8 +112,8 @@ calculate_database_size(Oid dbOid) AclResult aclresult; /* - * User must have connect privilege for target database or have privileges of - * pg_read_all_stats + * User must have connect privilege for target database or have privileges + * of pg_read_all_stats */ aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT); if (aclresult != ACLCHECK_OK && @@ -196,9 +196,9 @@ calculate_tablespace_size(Oid tblspcOid) AclResult aclresult; /* - * User must have privileges of pg_read_all_stats or have CREATE privilege for - * target tablespace, either explicitly granted or implicitly because it - * is default for current database. + * User must have privileges of pg_read_all_stats or have CREATE privilege + * for target tablespace, either explicitly granted or implicitly because + * it is default for current database. */ if (tblspcOid != MyDatabaseTableSpace && !has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS)) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 97a4544ffc6..e909c1a200c 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -898,7 +898,7 @@ static const KeyWord DCH_keywords[] = { {"month", 5, DCH_month, false, FROM_CHAR_DATE_GREGORIAN}, {"mon", 3, DCH_mon, false, FROM_CHAR_DATE_GREGORIAN}, {"ms", 2, DCH_MS, true, FROM_CHAR_DATE_NONE}, - {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */ + {"of", 2, DCH_OF, false, FROM_CHAR_DATE_NONE}, /* o */ {"p.m.", 4, DCH_p_m, false, FROM_CHAR_DATE_NONE}, /* p */ {"pm", 2, DCH_pm, false, FROM_CHAR_DATE_NONE}, {"q", 1, DCH_Q, true, FROM_CHAR_DATE_NONE}, /* q */ @@ -906,7 +906,7 @@ static const KeyWord DCH_keywords[] = { {"sssss", 5, DCH_SSSS, true, FROM_CHAR_DATE_NONE}, /* s */ {"ssss", 4, DCH_SSSS, true, FROM_CHAR_DATE_NONE}, {"ss", 2, DCH_SS, true, FROM_CHAR_DATE_NONE}, - {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */ + {"tzh", 3, DCH_TZH, false, FROM_CHAR_DATE_NONE}, /* t */ {"tzm", 3, DCH_TZM, true, FROM_CHAR_DATE_NONE}, {"tz", 2, DCH_tz, false, FROM_CHAR_DATE_NONE}, {"us", 2, DCH_US, true, FROM_CHAR_DATE_NONE}, /* u */ @@ -1675,8 +1675,8 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) if (!OidIsValid(collid)) { /* - * This typically means that the parser could not resolve a - * conflict of implicit collations, so report it that way. + * This typically means that the parser could not resolve a conflict + * of implicit collations, so report it that way. */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), @@ -1797,8 +1797,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) if (!OidIsValid(collid)) { /* - * This typically means that the parser could not resolve a - * conflict of implicit collations, so report it that way. + * This typically means that the parser could not resolve a conflict + * of implicit collations, so report it that way. */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), @@ -1920,8 +1920,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) if (!OidIsValid(collid)) { /* - * This typically means that the parser could not resolve a - * conflict of implicit collations, so report it that way. + * This typically means that the parser could not resolve a conflict + * of implicit collations, so report it that way. */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 63649ba7351..553cc25eb9d 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -44,9 +44,9 @@ typedef struct JsonUniqueHashEntry /* Context for key uniqueness check in builder functions */ typedef struct JsonUniqueBuilderState { - JsonUniqueCheckState check; /* unique check */ + JsonUniqueCheckState check; /* unique check */ StringInfoData skipped_keys; /* skipped keys with NULL values */ - MemoryContext mcxt; /* context for saving skipped keys */ + MemoryContext mcxt; /* context for saving skipped keys */ } JsonUniqueBuilderState; /* Element of object stack for key uniqueness check during json parsing */ @@ -774,10 +774,10 @@ to_json_is_immutable(Oid typoid) return false; case JSONTYPE_ARRAY: - return false; /* TODO recurse into elements */ + return false; /* TODO recurse into elements */ case JSONTYPE_COMPOSITE: - return false; /* TODO recurse into fields */ + return false; /* TODO recurse into fields */ case JSONTYPE_NUMERIC: case JSONTYPE_CAST: @@ -938,7 +938,7 @@ static uint32 json_unique_hash(const void *key, Size keysize) { const JsonUniqueHashEntry *entry = (JsonUniqueHashEntry *) key; - uint32 hash = hash_bytes_uint32(entry->object_id); + uint32 hash = hash_bytes_uint32(entry->object_id); hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len); @@ -1011,6 +1011,7 @@ json_unique_builder_get_skipped_keys(JsonUniqueBuilderState *cxt) if (!out->data) { MemoryContext oldcxt = MemoryContextSwitchTo(cxt->mcxt); + initStringInfo(out); MemoryContextSwitchTo(oldcxt); } @@ -1116,8 +1117,8 @@ json_object_agg_transfn_worker(FunctionCallInfo fcinfo, out = state->str; /* - * Append comma delimiter only if we have already outputted some fields - * after the initial string "{ ". + * Append comma delimiter only if we have already outputted some + * fields after the initial string "{ ". */ if (out->len > 2) appendStringInfoString(out, ", "); @@ -1285,7 +1286,7 @@ json_build_object_worker(int nargs, Datum *args, bool *nulls, Oid *types, if (nulls[i]) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("argument %d cannot be null", i + 1), + errmsg("argument %d cannot be null", i + 1), errhint("Object keys should be text."))); /* save key offset before key appending */ @@ -1327,6 +1328,7 @@ json_build_object(PG_FUNCTION_ARGS) Datum *args; bool *nulls; Oid *types; + /* build argument values to build the object */ int nargs = extract_variadic_args(fcinfo, 0, true, &args, &types, &nulls); @@ -1382,6 +1384,7 @@ json_build_array(PG_FUNCTION_ARGS) Datum *args; bool *nulls; Oid *types; + /* build argument values to build the object */ int nargs = extract_variadic_args(fcinfo, 0, true, &args, &types, &nulls); @@ -1706,7 +1709,7 @@ json_validate(text *json, bool check_unique_keys, bool throw_error) if (throw_error) json_ereport_error(result, lex); - return false; /* invalid json */ + return false; /* invalid json */ } if (check_unique_keys && !state.unique) @@ -1716,10 +1719,10 @@ json_validate(text *json, bool check_unique_keys, bool throw_error) (errcode(ERRCODE_DUPLICATE_JSON_OBJECT_KEY_VALUE), errmsg("duplicate JSON object key value"))); - return false; /* not unique keys */ + return false; /* not unique keys */ } - return true; /* ok */ + return true; /* ok */ } /* diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 26d81366c9f..39355e242d2 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -1148,10 +1148,10 @@ to_jsonb_is_immutable(Oid typoid) return false; case JSONBTYPE_ARRAY: - return false; /* TODO recurse into elements */ + return false; /* TODO recurse into elements */ case JSONBTYPE_COMPOSITE: - return false; /* TODO recurse into fields */ + return false; /* TODO recurse into fields */ case JSONBTYPE_NUMERIC: case JSONBTYPE_JSONCAST: @@ -1240,6 +1240,7 @@ jsonb_build_object(PG_FUNCTION_ARGS) Datum *args; bool *nulls; Oid *types; + /* build argument values to build the object */ int nargs = extract_variadic_args(fcinfo, 0, true, &args, &types, &nulls); @@ -1299,6 +1300,7 @@ jsonb_build_array(PG_FUNCTION_ARGS) Datum *args; bool *nulls; Oid *types; + /* build argument values to build the object */ int nargs = extract_variadic_args(fcinfo, 0, true, &args, &types, &nulls); @@ -2229,7 +2231,7 @@ jsonb_float8(PG_FUNCTION_ARGS) Jsonb * JsonbMakeEmptyArray(void) { - JsonbValue jbv; + JsonbValue jbv; jbv.type = jbvArray; jbv.val.array.elems = NULL; @@ -2245,7 +2247,7 @@ JsonbMakeEmptyArray(void) Jsonb * JsonbMakeEmptyObject(void) { - JsonbValue jbv; + JsonbValue jbv; jbv.type = jbvObject; jbv.val.object.pairs = NULL; @@ -2272,7 +2274,7 @@ JsonbUnquote(Jsonb *jb) return pstrdup(v.val.boolean ? "true" : "false"); else if (v.type == jbvNumeric) return DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(v.val.numeric))); + PointerGetDatum(v.val.numeric))); else if (v.type == jbvNull) return pstrdup("null"); else diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index 21d874c098a..5318eda9cfb 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -1959,7 +1959,8 @@ uniqueifyJsonbObject(JsonbValue *object, bool unique_keys, bool skip_nulls) if (hasNonUniq || skip_nulls) { - JsonbPair *ptr, *res; + JsonbPair *ptr, + *res; while (skip_nulls && object->val.object.nPairs > 0 && object->val.object.pairs->value.type == jbvNull) diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index d1356d64166..d427bdfbe0d 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3139,7 +3139,7 @@ Datum json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod, void **cache, MemoryContext mcxt, bool *isnull) { - JsValue jsv = { 0 }; + JsValue jsv = {0}; JsonbValue jbv; jsv.is_json = json_type == JSONOID; @@ -3157,7 +3157,8 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod, jsv.val.json.str = VARDATA_ANY(json); jsv.val.json.len = VARSIZE_ANY_EXHDR(json); - jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */ + jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in + * populate_composite() */ } else { @@ -3174,7 +3175,7 @@ json_populate_type(Datum json_val, Oid json_type, Oid typid, int32 typmod, if (!*cache) *cache = MemoryContextAllocZero(mcxt, sizeof(ColumnIOData)); - return populate_record_field(*cache , typid, typmod, NULL, mcxt, + return populate_record_field(*cache, typid, typmod, NULL, mcxt, PointerGetDatum(NULL), &jsv, isnull); } diff --git a/src/backend/utils/adt/jsonpath.c b/src/backend/utils/adt/jsonpath.c index 0ac14153aae..da9df4ae766 100644 --- a/src/backend/utils/adt/jsonpath.c +++ b/src/backend/utils/adt/jsonpath.c @@ -1094,7 +1094,7 @@ typedef struct JsonPathMutableContext { List *varnames; /* list of variable names */ List *varexprs; /* list of variable expressions */ - JsonPathDatatypeStatus current; /* status of @ item */ + JsonPathDatatypeStatus current; /* status of @ item */ bool lax; /* jsonpath is lax or strict */ bool mutable; /* resulting mutability status */ } JsonPathMutableContext; @@ -1282,18 +1282,18 @@ jspIsMutableWalker(JsonPathItem *jpi, JsonPathMutableContext *cxt) jspIsMutableWalker(&arg, cxt); break; - /* literals */ + /* literals */ case jpiNull: case jpiString: case jpiNumeric: case jpiBool: - /* accessors */ + /* accessors */ case jpiKey: case jpiAnyKey: - /* special items */ + /* special items */ case jpiSubscript: case jpiLast: - /* item methods */ + /* item methods */ case jpiType: case jpiSize: case jpiAbs: diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c index 2544c6b1551..0943a381bac 100644 --- a/src/backend/utils/adt/jsonpath_exec.c +++ b/src/backend/utils/adt/jsonpath_exec.c @@ -288,9 +288,9 @@ static void getJsonPathItem(JsonPathExecContext *cxt, JsonPathItem *item, JsonbValue *value); static void getJsonPathVariable(JsonPathExecContext *cxt, JsonPathItem *variable, JsonbValue *value); -static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName, - int varNameLen, JsonbValue *val, - JsonbValue *baseObject); +static int getJsonPathVariableFromJsonb(void *varsJsonb, char *varName, + int varNameLen, JsonbValue *val, + JsonbValue *baseObject); static int JsonbArraySize(JsonbValue *jb); static JsonPathBool executeComparison(JsonPathItem *cmp, JsonbValue *lv, JsonbValue *rv, void *p); @@ -322,7 +322,7 @@ static int compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2, static JsonTableJoinState *JsonTableInitPlanState(JsonTableContext *cxt, - Node *plan, JsonTableScanState *parent); + Node *plan, JsonTableScanState *parent); static bool JsonTableNextRow(JsonTableScanState *scan); @@ -2743,7 +2743,7 @@ static int compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2, bool useTz, bool *cast_error) { - PGFunction cmpfunc; + PGFunction cmpfunc; *cast_error = false; @@ -2987,8 +2987,8 @@ JsonPathQuery(Datum jb, JsonPath *jp, JsonWrapper wrapper, bool *empty, JsonbValue * JsonPathValue(Datum jb, JsonPath *jp, bool *empty, bool *error, List *vars) { - JsonbValue *res; - JsonValueList found = { 0 }; + JsonbValue *res; + JsonValueList found = {0}; JsonPathExecResult jper PG_USED_FOR_ASSERTS_ONLY; int count; @@ -3123,8 +3123,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res) text *txt = DatumGetTextP(val); char *str = text_to_cstring(txt); Jsonb *jb = - DatumGetJsonbP(DirectFunctionCall1(jsonb_in, - CStringGetDatum(str))); + DatumGetJsonbP(DirectFunctionCall1(jsonb_in, + CStringGetDatum(str))); pfree(str); @@ -3221,7 +3221,7 @@ JsonTableInitOpaque(TableFuncScanState *state, int natts) { JsonTableContext *cxt; PlanState *ps = &state->ss.ps; - TableFuncScan *tfs = castNode(TableFuncScan, ps->plan); + TableFuncScan *tfs = castNode(TableFuncScan, ps->plan); TableFunc *tf = tfs->tablefunc; JsonExpr *ci = castNode(JsonExpr, tf->docexpr); JsonTableParent *root = castNode(JsonTableParent, tf->plan); @@ -3298,7 +3298,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item) { MemoryContext oldcxt; JsonPathExecResult res; - Jsonb *js = (Jsonb *) DatumGetJsonbP(item); + Jsonb *js = (Jsonb *) DatumGetJsonbP(item); JsonValueListClear(&scan->found); @@ -3307,7 +3307,7 @@ JsonTableResetContextItem(JsonTableScanState *scan, Datum item) oldcxt = MemoryContextSwitchTo(scan->mcxt); res = executeJsonPath(scan->path, scan->args, EvalJsonPathVar, js, - scan->errorOnError, &scan->found, false /* FIXME */); + scan->errorOnError, &scan->found, false /* FIXME */ ); MemoryContextSwitchTo(oldcxt); @@ -3369,9 +3369,9 @@ JsonTableNextJoinRow(JsonTableJoinState *state) /* inner rows are exhausted */ if (state->u.join.cross) - state->u.join.advanceRight = false; /* next outer row */ + state->u.join.advanceRight = false; /* next outer row */ else - return false; /* end of scan */ + return false; /* end of scan */ } while (!state->u.join.advanceRight) @@ -3387,7 +3387,7 @@ JsonTableNextJoinRow(JsonTableJoinState *state) JsonTableRescanRecursive(state->u.join.right); if (!JsonTableNextJoinRow(state->u.join.right)) - continue; /* next outer row */ + continue; /* next outer row */ state->u.join.advanceRight = true; /* next inner row */ } @@ -3460,7 +3460,7 @@ JsonTableNextRow(JsonTableScanState *scan) { scan->current = PointerGetDatum(NULL); scan->currentIsNull = true; - return false; /* end of scan */ + return false; /* end of scan */ } /* set current row item */ @@ -3518,12 +3518,12 @@ JsonTableGetValue(TableFuncScanState *state, int colnum, JsonTableScanState *scan = cxt->colexprs[colnum].scan; Datum result; - if (scan->currentIsNull) /* NULL from outer/union join */ + if (scan->currentIsNull) /* NULL from outer/union join */ { result = (Datum) 0; *isnull = true; } - else if (estate) /* regular column */ + else if (estate) /* regular column */ { result = ExecEvalExpr(estate, econtext, isnull); } diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index 833ee8f814c..e02fc3725ad 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -181,8 +181,8 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) if (!OidIsValid(collation)) { /* - * This typically means that the parser could not resolve a - * conflict of implicit collations, so report it that way. + * This typically means that the parser could not resolve a conflict + * of implicit collations, so report it that way. */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c index 67d7d67fb83..da5c7d09069 100644 --- a/src/backend/utils/adt/multirangetypes.c +++ b/src/backend/utils/adt/multirangetypes.c @@ -1439,7 +1439,8 @@ multirange_agg_transfn(PG_FUNCTION_ARGS) if (range_count == 0) { /* - * Add an empty range so we get an empty result (not a null result). + * Add an empty range so we get an empty result (not a null + * result). */ accumArrayResult(state, RangeTypePGetDatum(make_empty_range(rngtypcache)), diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 45547f6ae7f..920a63b0081 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -8537,139 +8537,138 @@ div_var(const NumericVar *var1, const NumericVar *var2, NumericVar *result, alloc_var(result, res_ndigits); res_digits = result->digits; + /* + * The full multiple-place algorithm is taken from Knuth volume 2, + * Algorithm 4.3.1D. + * + * We need the first divisor digit to be >= NBASE/2. If it isn't, make it + * so by scaling up both the divisor and dividend by the factor "d". (The + * reason for allocating dividend[0] above is to leave room for possible + * carry here.) + */ + if (divisor[1] < HALF_NBASE) + { + int d = NBASE / (divisor[1] + 1); + + carry = 0; + for (i = var2ndigits; i > 0; i--) + { + carry += divisor[i] * d; + divisor[i] = carry % NBASE; + carry = carry / NBASE; + } + Assert(carry == 0); + carry = 0; + /* at this point only var1ndigits of dividend can be nonzero */ + for (i = var1ndigits; i >= 0; i--) + { + carry += dividend[i] * d; + dividend[i] = carry % NBASE; + carry = carry / NBASE; + } + Assert(carry == 0); + Assert(divisor[1] >= HALF_NBASE); + } + /* First 2 divisor digits are used repeatedly in main loop */ + divisor1 = divisor[1]; + divisor2 = divisor[2]; + + /* + * Begin the main loop. Each iteration of this loop produces the j'th + * quotient digit by dividing dividend[j .. j + var2ndigits] by the + * divisor; this is essentially the same as the common manual procedure + * for long division. + */ + for (j = 0; j < res_ndigits; j++) + { + /* Estimate quotient digit from the first two dividend digits */ + int next2digits = dividend[j] * NBASE + dividend[j + 1]; + int qhat; + /* - * The full multiple-place algorithm is taken from Knuth volume 2, - * Algorithm 4.3.1D. - * - * We need the first divisor digit to be >= NBASE/2. If it isn't, - * make it so by scaling up both the divisor and dividend by the - * factor "d". (The reason for allocating dividend[0] above is to - * leave room for possible carry here.) + * If next2digits are 0, then quotient digit must be 0 and there's no + * need to adjust the working dividend. It's worth testing here to + * fall out ASAP when processing trailing zeroes in a dividend. */ - if (divisor[1] < HALF_NBASE) + if (next2digits == 0) { - int d = NBASE / (divisor[1] + 1); - - carry = 0; - for (i = var2ndigits; i > 0; i--) - { - carry += divisor[i] * d; - divisor[i] = carry % NBASE; - carry = carry / NBASE; - } - Assert(carry == 0); - carry = 0; - /* at this point only var1ndigits of dividend can be nonzero */ - for (i = var1ndigits; i >= 0; i--) - { - carry += dividend[i] * d; - dividend[i] = carry % NBASE; - carry = carry / NBASE; - } - Assert(carry == 0); - Assert(divisor[1] >= HALF_NBASE); + res_digits[j] = 0; + continue; } - /* First 2 divisor digits are used repeatedly in main loop */ - divisor1 = divisor[1]; - divisor2 = divisor[2]; + + if (dividend[j] == divisor1) + qhat = NBASE - 1; + else + qhat = next2digits / divisor1; /* - * Begin the main loop. Each iteration of this loop produces the j'th - * quotient digit by dividing dividend[j .. j + var2ndigits] by the - * divisor; this is essentially the same as the common manual - * procedure for long division. + * Adjust quotient digit if it's too large. Knuth proves that after + * this step, the quotient digit will be either correct or just one + * too large. (Note: it's OK to use dividend[j+2] here because we + * know the divisor length is at least 2.) */ - for (j = 0; j < res_ndigits; j++) + while (divisor2 * qhat > + (next2digits - qhat * divisor1) * NBASE + dividend[j + 2]) + qhat--; + + /* As above, need do nothing more when quotient digit is 0 */ + if (qhat > 0) { - /* Estimate quotient digit from the first two dividend digits */ - int next2digits = dividend[j] * NBASE + dividend[j + 1]; - int qhat; + NumericDigit *dividend_j = ÷nd[j]; /* - * If next2digits are 0, then quotient digit must be 0 and there's - * no need to adjust the working dividend. It's worth testing - * here to fall out ASAP when processing trailing zeroes in a - * dividend. + * Multiply the divisor by qhat, and subtract that from the + * working dividend. The multiplication and subtraction are + * folded together here, noting that qhat <= NBASE (since it might + * be one too large), and so the intermediate result "tmp_result" + * is in the range [-NBASE^2, NBASE - 1], and "borrow" is in the + * range [0, NBASE]. */ - if (next2digits == 0) + borrow = 0; + for (i = var2ndigits; i >= 0; i--) { - res_digits[j] = 0; - continue; - } + int tmp_result; - if (dividend[j] == divisor1) - qhat = NBASE - 1; - else - qhat = next2digits / divisor1; + tmp_result = dividend_j[i] - borrow - divisor[i] * qhat; + borrow = (NBASE - 1 - tmp_result) / NBASE; + dividend_j[i] = tmp_result + borrow * NBASE; + } /* - * Adjust quotient digit if it's too large. Knuth proves that - * after this step, the quotient digit will be either correct or - * just one too large. (Note: it's OK to use dividend[j+2] here - * because we know the divisor length is at least 2.) + * If we got a borrow out of the top dividend digit, then indeed + * qhat was one too large. Fix it, and add back the divisor to + * correct the working dividend. (Knuth proves that this will + * occur only about 3/NBASE of the time; hence, it's a good idea + * to test this code with small NBASE to be sure this section gets + * exercised.) */ - while (divisor2 * qhat > - (next2digits - qhat * divisor1) * NBASE + dividend[j + 2]) - qhat--; - - /* As above, need do nothing more when quotient digit is 0 */ - if (qhat > 0) + if (borrow) { - NumericDigit *dividend_j = ÷nd[j]; - - /* - * Multiply the divisor by qhat, and subtract that from the - * working dividend. The multiplication and subtraction are - * folded together here, noting that qhat <= NBASE (since it - * might be one too large), and so the intermediate result - * "tmp_result" is in the range [-NBASE^2, NBASE - 1], and - * "borrow" is in the range [0, NBASE]. - */ - borrow = 0; + qhat--; + carry = 0; for (i = var2ndigits; i >= 0; i--) { - int tmp_result; - - tmp_result = dividend_j[i] - borrow - divisor[i] * qhat; - borrow = (NBASE - 1 - tmp_result) / NBASE; - dividend_j[i] = tmp_result + borrow * NBASE; - } - - /* - * If we got a borrow out of the top dividend digit, then - * indeed qhat was one too large. Fix it, and add back the - * divisor to correct the working dividend. (Knuth proves - * that this will occur only about 3/NBASE of the time; hence, - * it's a good idea to test this code with small NBASE to be - * sure this section gets exercised.) - */ - if (borrow) - { - qhat--; - carry = 0; - for (i = var2ndigits; i >= 0; i--) + carry += dividend_j[i] + divisor[i]; + if (carry >= NBASE) { - carry += dividend_j[i] + divisor[i]; - if (carry >= NBASE) - { - dividend_j[i] = carry - NBASE; - carry = 1; - } - else - { - dividend_j[i] = carry; - carry = 0; - } + dividend_j[i] = carry - NBASE; + carry = 1; + } + else + { + dividend_j[i] = carry; + carry = 0; } - /* A carry should occur here to cancel the borrow above */ - Assert(carry == 1); } + /* A carry should occur here to cancel the borrow above */ + Assert(carry == 1); } - - /* And we're done with this quotient digit */ - res_digits[j] = qhat; } + /* And we're done with this quotient digit */ + res_digits[j] = qhat; + } + pfree(dividend); /* diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 2c47dea3429..a0490a75224 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1625,7 +1625,7 @@ pg_newlocale_from_collation(Oid collid) } datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion, - &isnull); + &isnull); if (!isnull) { char *actual_versionstr; @@ -1992,7 +1992,7 @@ check_icu_locale(const char *icu_locale) { #ifdef USE_ICU UCollator *collator; - UErrorCode status; + UErrorCode status; status = U_ZERO_ERROR; collator = ucol_open(icu_locale, &status); diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index d3ad795a6ea..893690dad52 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -2411,7 +2411,7 @@ pg_stat_have_stats(PG_FUNCTION_ARGS) char *stats_type = text_to_cstring(PG_GETARG_TEXT_P(0)); Oid dboid = PG_GETARG_OID(1); Oid objoid = PG_GETARG_OID(2); - PgStat_Kind kind = pgstat_get_kind_from_str(stats_type); + PgStat_Kind kind = pgstat_get_kind_from_str(stats_type); PG_RETURN_BOOL(pgstat_have_entry(kind, dboid, objoid)); } diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index f90b0a3b358..1190b8000bc 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -608,8 +608,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) /* * Non-empty range A contains non-empty range B if lower * bound of A is lower or equal to lower bound of range B - * and upper bound of range A is greater than or equal to upper - * bound of range A. + * and upper bound of range A is greater than or equal to + * upper bound of range A. * * All non-empty ranges contain an empty range. */ diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 01d4c22cfce..51b3fdc9a01 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -113,8 +113,10 @@ typedef struct RI_ConstraintInfo Oid fk_relid; /* referencing relation */ char confupdtype; /* foreign key's ON UPDATE action */ char confdeltype; /* foreign key's ON DELETE action */ - int ndelsetcols; /* number of columns referenced in ON DELETE SET clause */ - int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on delete */ + int ndelsetcols; /* number of columns referenced in ON DELETE + * SET clause */ + int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on + * delete */ char confmatchtype; /* foreign key's match type */ int nkeys; /* number of key columns */ int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */ @@ -1059,7 +1061,8 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind) /* * Fetch or prepare a saved plan for the trigger. */ - switch (tgkind) { + switch (tgkind) + { case RI_TRIGTYPE_UPDATE: queryno = is_set_null ? RI_PLAN_SETNULL_ONUPDATE @@ -1086,25 +1089,29 @@ ri_set(TriggerData *trigdata, bool is_set_null, int tgkind) const char *qualsep; Oid queryoids[RI_MAX_NUMKEYS]; const char *fk_only; - int num_cols_to_set; + int num_cols_to_set; const int16 *set_cols; - switch (tgkind) { + switch (tgkind) + { case RI_TRIGTYPE_UPDATE: num_cols_to_set = riinfo->nkeys; set_cols = riinfo->fk_attnums; break; case RI_TRIGTYPE_DELETE: + /* - * If confdelsetcols are present, then we only update - * the columns specified in that array, otherwise we - * update all the referencing columns. + * If confdelsetcols are present, then we only update the + * columns specified in that array, otherwise we update all + * the referencing columns. */ - if (riinfo->ndelsetcols != 0) { + if (riinfo->ndelsetcols != 0) + { num_cols_to_set = riinfo->ndelsetcols; set_cols = riinfo->confdelsetcols; } - else { + else + { num_cols_to_set = riinfo->nkeys; set_cols = riinfo->fk_attnums; } diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 5d49f564a2e..f22ecfc5832 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -2331,7 +2331,10 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, if (string) appendStringInfo(&buf, " ON DELETE %s", string); - /* Add columns specified to SET NULL or SET DEFAULT if provided. */ + /* + * Add columns specified to SET NULL or SET DEFAULT if + * provided. + */ val = SysCacheGetAttr(CONSTROID, tup, Anum_pg_constraint_confdelsetcols, &isnull); if (!isnull) @@ -8260,7 +8263,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags) case T_GroupingFunc: /* own parentheses */ case T_WindowFunc: /* own parentheses */ case T_CaseExpr: /* other separators */ - case T_JsonExpr: /* own parentheses */ + case T_JsonExpr: /* own parentheses */ return true; default: return false; @@ -8456,8 +8459,8 @@ get_json_format(JsonFormat *format, StringInfo buf) if (format->encoding != JS_ENC_DEFAULT) { const char *encoding = - format->encoding == JS_ENC_UTF16 ? "UTF16" : - format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8"; + format->encoding == JS_ENC_UTF16 ? "UTF16" : + format->encoding == JS_ENC_UTF32 ? "UTF32" : "UTF8"; appendStringInfo(buf, " ENCODING %s", encoding); } @@ -8479,7 +8482,7 @@ get_json_returning(JsonReturning *returning, StringInfo buf, if (!json_format_by_default || returning->format->format_type != - (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON)) + (returning->typid == JSONBOID ? JS_FORMAT_JSONB : JS_FORMAT_JSON)) get_json_format(returning->format, buf); } @@ -9778,7 +9781,8 @@ get_rule_expr(Node *node, deparse_context *context, if (jexpr->passing_values) { - ListCell *lc1, *lc2; + ListCell *lc1, + *lc2; bool needcomma = false; appendStringInfoString(buf, " PASSING "); @@ -10147,7 +10151,7 @@ get_json_constructor(JsonConstructorExpr *ctor, deparse_context *context, if (nargs > 0) { const char *sep = ctor->type == JSCTOR_JSON_OBJECT && - (nargs % 2) != 0 ? " : " : ", "; + (nargs % 2) != 0 ? " : " : ", "; appendStringInfoString(buf, sep); } @@ -10251,7 +10255,8 @@ get_agg_expr_helper(Aggref *aggref, deparse_context *context, if (is_json_objectagg) { if (i > 2) - break; /* skip ABSENT ON NULL and WITH UNIQUE args */ + break; /* skip ABSENT ON NULL and WITH UNIQUE + * args */ appendStringInfoString(buf, " : "); } @@ -11160,16 +11165,16 @@ get_json_table_nested_columns(TableFunc *tf, Node *node, } else { - JsonTableParent *n = castNode(JsonTableParent, node); + JsonTableParent *n = castNode(JsonTableParent, node); - if (needcomma) - appendStringInfoChar(context->buf, ','); + if (needcomma) + appendStringInfoChar(context->buf, ','); - appendStringInfoChar(context->buf, ' '); - appendContextKeyword(context, "NESTED PATH ", 0, 0, 0); - get_const_expr(n->path, context, -1); - appendStringInfo(context->buf, " AS %s", quote_identifier(n->name)); - get_json_table_columns(tf, n, context, showimplicit); + appendStringInfoChar(context->buf, ' '); + appendContextKeyword(context, "NESTED PATH ", 0, 0, 0); + get_const_expr(n->path, context, -1); + appendStringInfo(context->buf, " AS %s", quote_identifier(n->name)); + get_json_table_columns(tf, n, context, showimplicit); } } @@ -11199,17 +11204,17 @@ get_json_table_plan(TableFunc *tf, Node *node, deparse_context *context, } else { - JsonTableParent *n = castNode(JsonTableParent, node); + JsonTableParent *n = castNode(JsonTableParent, node); - appendStringInfoString(context->buf, quote_identifier(n->name)); + appendStringInfoString(context->buf, quote_identifier(n->name)); - if (n->child) - { + if (n->child) + { appendStringInfoString(context->buf, n->outerJoin ? " OUTER " : " INNER "); get_json_table_plan(tf, n->child, context, IsA(n->child, JsonTableSibling)); - } + } } if (parenthesize) @@ -11348,7 +11353,8 @@ get_json_table(TableFunc *tf, deparse_context *context, bool showimplicit) if (jexpr->passing_values) { - ListCell *lc1, *lc2; + ListCell *lc1, + *lc2; bool needcomma = false; appendStringInfoChar(buf, ' '); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 71cbc1c3d80..fa1f589fad8 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -3380,9 +3380,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, */ double estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs, - double input_rows, - List **pgset, EstimationInfo *estinfo, - List **cache_varinfos, int prevNExprs) + double input_rows, + List **pgset, EstimationInfo *estinfo, + List **cache_varinfos, int prevNExprs) { List *varinfos = (cache_varinfos) ? *cache_varinfos : NIL; double srf_multiplier = 1.0; @@ -3433,7 +3433,7 @@ estimate_num_groups_incremental(PlannerInfo *root, List *groupExprs, if (cache_varinfos && j++ < prevNExprs) { if (pgset) - i++; /* to keep in sync with lines below */ + i++; /* to keep in sync with lines below */ continue; } @@ -3944,7 +3944,7 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, Oid statOid = InvalidOid; MVNDistinct *stats; StatisticExtInfo *matched_info = NULL; - RangeTblEntry *rte; + RangeTblEntry *rte; /* bail out immediately if the table has no extended statistics */ if (!rel->statlist) @@ -5255,7 +5255,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, foreach(slist, onerel->statlist) { StatisticExtInfo *info = (StatisticExtInfo *) lfirst(slist); - RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root); + RangeTblEntry *rte = planner_rt_fetch(onerel->relid, root); ListCell *expr_item; int pos; diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 8acb725bc8f..f70f829d830 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -2194,6 +2194,7 @@ timestamp_sortsupport(PG_FUNCTION_ARGS) SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0); #if SIZEOF_DATUM >= 8 + /* * If this build has pass-by-value timestamps, then we can use a standard * comparator function. @@ -4349,59 +4350,59 @@ interval_trunc(PG_FUNCTION_ARGS) if (type == UNITS) { interval2itm(*interval, tm); - switch (val) - { - case DTK_MILLENNIUM: - /* caution: C division may have negative remainder */ - tm->tm_year = (tm->tm_year / 1000) * 1000; - /* FALL THRU */ - case DTK_CENTURY: - /* caution: C division may have negative remainder */ - tm->tm_year = (tm->tm_year / 100) * 100; - /* FALL THRU */ - case DTK_DECADE: - /* caution: C division may have negative remainder */ - tm->tm_year = (tm->tm_year / 10) * 10; - /* FALL THRU */ - case DTK_YEAR: - tm->tm_mon = 0; - /* FALL THRU */ - case DTK_QUARTER: - tm->tm_mon = 3 * (tm->tm_mon / 3); - /* FALL THRU */ - case DTK_MONTH: - tm->tm_mday = 0; - /* FALL THRU */ - case DTK_DAY: - tm->tm_hour = 0; - /* FALL THRU */ - case DTK_HOUR: - tm->tm_min = 0; - /* FALL THRU */ - case DTK_MINUTE: - tm->tm_sec = 0; - /* FALL THRU */ - case DTK_SECOND: - tm->tm_usec = 0; - break; - case DTK_MILLISEC: - tm->tm_usec = (tm->tm_usec / 1000) * 1000; - break; - case DTK_MICROSEC: - break; - - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unit \"%s\" not supported for type %s", - lowunits, format_type_be(INTERVALOID)), - (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0)); - } + switch (val) + { + case DTK_MILLENNIUM: + /* caution: C division may have negative remainder */ + tm->tm_year = (tm->tm_year / 1000) * 1000; + /* FALL THRU */ + case DTK_CENTURY: + /* caution: C division may have negative remainder */ + tm->tm_year = (tm->tm_year / 100) * 100; + /* FALL THRU */ + case DTK_DECADE: + /* caution: C division may have negative remainder */ + tm->tm_year = (tm->tm_year / 10) * 10; + /* FALL THRU */ + case DTK_YEAR: + tm->tm_mon = 0; + /* FALL THRU */ + case DTK_QUARTER: + tm->tm_mon = 3 * (tm->tm_mon / 3); + /* FALL THRU */ + case DTK_MONTH: + tm->tm_mday = 0; + /* FALL THRU */ + case DTK_DAY: + tm->tm_hour = 0; + /* FALL THRU */ + case DTK_HOUR: + tm->tm_min = 0; + /* FALL THRU */ + case DTK_MINUTE: + tm->tm_sec = 0; + /* FALL THRU */ + case DTK_SECOND: + tm->tm_usec = 0; + break; + case DTK_MILLISEC: + tm->tm_usec = (tm->tm_usec / 1000) * 1000; + break; + case DTK_MICROSEC: + break; - if (itm2interval(tm, result) != 0) + default: ereport(ERROR, - (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("interval out of range"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unit \"%s\" not supported for type %s", + lowunits, format_type_be(INTERVALOID)), + (val == DTK_WEEK) ? errdetail("Months usually have fractional weeks.") : 0)); + } + + if (itm2interval(tm, result) != 0) + ereport(ERROR, + (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), + errmsg("interval out of range"))); } else { @@ -5225,80 +5226,80 @@ interval_part_common(PG_FUNCTION_ARGS, bool retnumeric) if (type == UNITS) { interval2itm(*interval, tm); - switch (val) - { - case DTK_MICROSEC: - intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec; - break; + switch (val) + { + case DTK_MICROSEC: + intresult = tm->tm_sec * INT64CONST(1000000) + tm->tm_usec; + break; - case DTK_MILLISEC: - if (retnumeric) - /*--- - * tm->tm_sec * 1000 + fsec / 1000 - * = (tm->tm_sec * 1'000'000 + fsec) / 1000 - */ - PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3)); - else - PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0); - break; + case DTK_MILLISEC: + if (retnumeric) + /*--- + * tm->tm_sec * 1000 + fsec / 1000 + * = (tm->tm_sec * 1'000'000 + fsec) / 1000 + */ + PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 3)); + else + PG_RETURN_FLOAT8(tm->tm_sec * 1000.0 + tm->tm_usec / 1000.0); + break; - case DTK_SECOND: - if (retnumeric) - /*--- - * tm->tm_sec + fsec / 1'000'000 - * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000 - */ - PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6)); - else - PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0); - break; + case DTK_SECOND: + if (retnumeric) + /*--- + * tm->tm_sec + fsec / 1'000'000 + * = (tm->tm_sec * 1'000'000 + fsec) / 1'000'000 + */ + PG_RETURN_NUMERIC(int64_div_fast_to_numeric(tm->tm_sec * INT64CONST(1000000) + tm->tm_usec, 6)); + else + PG_RETURN_FLOAT8(tm->tm_sec + tm->tm_usec / 1000000.0); + break; - case DTK_MINUTE: - intresult = tm->tm_min; - break; + case DTK_MINUTE: + intresult = tm->tm_min; + break; - case DTK_HOUR: - intresult = tm->tm_hour; - break; + case DTK_HOUR: + intresult = tm->tm_hour; + break; - case DTK_DAY: - intresult = tm->tm_mday; - break; + case DTK_DAY: + intresult = tm->tm_mday; + break; - case DTK_MONTH: - intresult = tm->tm_mon; - break; + case DTK_MONTH: + intresult = tm->tm_mon; + break; - case DTK_QUARTER: - intresult = (tm->tm_mon / 3) + 1; - break; + case DTK_QUARTER: + intresult = (tm->tm_mon / 3) + 1; + break; - case DTK_YEAR: - intresult = tm->tm_year; - break; + case DTK_YEAR: + intresult = tm->tm_year; + break; - case DTK_DECADE: - /* caution: C division may have negative remainder */ - intresult = tm->tm_year / 10; - break; + case DTK_DECADE: + /* caution: C division may have negative remainder */ + intresult = tm->tm_year / 10; + break; - case DTK_CENTURY: - /* caution: C division may have negative remainder */ - intresult = tm->tm_year / 100; - break; + case DTK_CENTURY: + /* caution: C division may have negative remainder */ + intresult = tm->tm_year / 100; + break; - case DTK_MILLENNIUM: - /* caution: C division may have negative remainder */ - intresult = tm->tm_year / 1000; - break; + case DTK_MILLENNIUM: + /* caution: C division may have negative remainder */ + intresult = tm->tm_year / 1000; + break; - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unit \"%s\" not supported for type %s", - lowunits, format_type_be(INTERVALOID)))); - intresult = 0; - } + default: + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("unit \"%s\" not supported for type %s", + lowunits, format_type_be(INTERVALOID)))); + intresult = 0; + } } else if (type == RESERV && val == DTK_EPOCH) { diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c index a157f864e12..7cec9372485 100644 --- a/src/backend/utils/adt/uuid.c +++ b/src/backend/utils/adt/uuid.c @@ -377,8 +377,8 @@ uuid_abbrev_convert(Datum original, SortSupport ssup) * * This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer * 3-way comparator) works correctly on all platforms. If we didn't do - * this, the comparator would have to call memcmp() with a pair of pointers - * to the first byte of each abbreviated key, which is slower. + * this, the comparator would have to call memcmp() with a pair of + * pointers to the first byte of each abbreviated key, which is slower. */ res = DatumBigEndianToNative(res); diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 8b5b30ed714..bbeb0a2653a 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -744,7 +744,7 @@ bpchareq(PG_FUNCTION_ARGS) bool result; Oid collid = PG_GET_COLLATION(); bool locale_is_c = false; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; check_collation_set(collid); @@ -789,7 +789,7 @@ bpcharne(PG_FUNCTION_ARGS) bool result; Oid collid = PG_GET_COLLATION(); bool locale_is_c = false; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; check_collation_set(collid); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index cfc135c7beb..919138eaf32 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -1758,7 +1758,7 @@ texteq(PG_FUNCTION_ARGS) { Oid collid = PG_GET_COLLATION(); bool locale_is_c = false; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; bool result; check_collation_set(collid); @@ -1817,7 +1817,7 @@ textne(PG_FUNCTION_ARGS) { Oid collid = PG_GET_COLLATION(); bool locale_is_c = false; - pg_locale_t mylocale = 0; + pg_locale_t mylocale = 0; bool result; check_collation_set(collid); @@ -2674,8 +2674,8 @@ done: * * This is needed so that ssup_datum_unsigned_cmp() (an unsigned integer * 3-way comparator) works correctly on all platforms. If we didn't do - * this, the comparator would have to call memcmp() with a pair of pointers - * to the first byte of each abbreviated key, which is slower. + * this, the comparator would have to call memcmp() with a pair of + * pointers to the first byte of each abbreviated key, which is slower. */ res = DatumBigEndianToNative(res); diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 4cf6db504ff..0d6a2956748 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -689,10 +689,10 @@ RevalidateCachedQuery(CachedPlanSource *plansource, queryEnv); else tlist = pg_analyze_and_rewrite_fixedparams(rawtree, - plansource->query_string, - plansource->param_types, - plansource->num_params, - queryEnv); + plansource->query_string, + plansource->param_types, + plansource->num_params, + queryEnv); /* Release snapshot if we got one */ if (snapshot_set) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 43f14c233d6..60e72f9e8bf 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -5107,7 +5107,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind) Bitmapset *uindexattrs; /* columns in unique indexes */ Bitmapset *pkindexattrs; /* columns in the primary index */ Bitmapset *idindexattrs; /* columns in the replica identity */ - Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ + Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */ List *indexoidlist; List *newindexoidlist; Oid relpkindex; @@ -5237,7 +5237,7 @@ restart: { if (indexDesc->rd_indam->amhotblocking) hotblockingattrs = bms_add_member(hotblockingattrs, - attrnum - FirstLowInvalidHeapAttributeNumber); + attrnum - FirstLowInvalidHeapAttributeNumber); if (isKey && i < indexDesc->rd_index->indnkeyatts) uindexattrs = bms_add_member(uindexattrs, @@ -5258,9 +5258,9 @@ restart: pull_varattnos(indexExpressions, 1, &hotblockingattrs); /* - * Collect all attributes in the index predicate, too. We have to ignore - * amhotblocking flag, because the row might become indexable, in which - * case we have to add it to the index. + * Collect all attributes in the index predicate, too. We have to + * ignore amhotblocking flag, because the row might become indexable, + * in which case we have to add it to the index. */ pull_varattnos(indexPredicate, 1, &hotblockingattrs); @@ -5308,9 +5308,8 @@ restart: /* * Now save copies of the bitmaps in the relcache entry. We intentionally * set rd_attrsvalid last, because that's what signals validity of the - * values; if we run out of memory before making that copy, we won't - * leave the relcache entry looking like the other ones are valid but - * empty. + * values; if we run out of memory before making that copy, we won't leave + * the relcache entry looking like the other ones are valid but empty. */ oldcxt = MemoryContextSwitchTo(CacheMemoryContext); relation->rd_keyattr = bms_copy(uindexattrs); @@ -5636,8 +5635,8 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc) pubdesc->pubactions.pubtruncate |= pubform->pubtruncate; /* - * Check if all columns referenced in the filter expression are part of - * the REPLICA IDENTITY index or not. + * Check if all columns referenced in the filter expression are part + * of the REPLICA IDENTITY index or not. * * If the publication is FOR ALL TABLES then it means the table has no * row filters and we can skip the validation. @@ -5645,7 +5644,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc) if (!pubform->puballtables && (pubform->pubupdate || pubform->pubdelete) && pub_rf_contains_invalid_column(pubid, relation, ancestors, - pubform->pubviaroot)) + pubform->pubviaroot)) { if (pubform->pubupdate) pubdesc->rf_valid_for_update = false; @@ -5662,7 +5661,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc) if (!pubform->puballtables && (pubform->pubupdate || pubform->pubdelete) && pub_collist_contains_invalid_column(pubid, relation, ancestors, - pubform->pubviaroot)) + pubform->pubviaroot)) { if (pubform->pubupdate) pubdesc->cols_valid_for_update = false; diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 75a3aedc5af..2a330cf3ba4 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -287,7 +287,7 @@ RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId) void RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath) { - RelMapFile map; + RelMapFile map; /* * Read the relmap file from the source database. @@ -302,8 +302,8 @@ RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath) * RelationMappingLock. * * There's no point in trying to preserve files here. The new database - * isn't usable yet anyway, and won't ever be if we can't install a - * relmap file. + * isn't usable yet anyway, and won't ever be if we can't install a relmap + * file. */ write_relmap_file(&map, true, false, false, dbid, tsid, dstdbpath); } @@ -1089,11 +1089,11 @@ relmap_redo(XLogReaderState *record) * There shouldn't be anyone else updating relmaps during WAL replay, * but grab the lock to interlock against load_relmap_file(). * - * Note that we use the same WAL record for updating the relmap of - * an existing database as we do for creating a new database. In - * the latter case, taking the relmap log and sending sinval messages - * is unnecessary, but harmless. If we wanted to avoid it, we could - * add a flag to the WAL record to indicate which operation is being + * Note that we use the same WAL record for updating the relmap of an + * existing database as we do for creating a new database. In the + * latter case, taking the relmap log and sending sinval messages is + * unnecessary, but harmless. If we wanted to avoid it, we could add a + * flag to the WAL record to indicate which operation is being * performed. */ LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE); diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index d297ba08295..fa701daa26f 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -429,6 +429,7 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect iculocale = NULL; default_locale.provider = dbform->datlocprovider; + /* * Default locale is currently always deterministic. Nondeterministic * locales currently don't support pattern matching, which would break a @@ -604,8 +605,8 @@ BaseInit(void) InitTemporaryFileAccess(); /* - * Initialize local buffers for WAL record construction, in case we - * ever try to insert XLOG. + * Initialize local buffers for WAL record construction, in case we ever + * try to insert XLOG. */ InitXLogInsert(); @@ -693,10 +694,10 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, } /* - * If this is either a bootstrap process or a standalone backend, start - * up the XLOG machinery, and register to have it closed down at exit. - * In other cases, the startup process is responsible for starting up - * the XLOG machinery, and the checkpointer for closing it down. + * If this is either a bootstrap process or a standalone backend, start up + * the XLOG machinery, and register to have it closed down at exit. In + * other cases, the startup process is responsible for starting up the + * XLOG machinery, and the checkpointer for closing it down. */ if (!IsUnderPostmaster) { @@ -1241,7 +1242,8 @@ ShutdownPostgres(int code, Datum arg) */ #ifdef USE_ASSERT_CHECKING { - int held_lwlocks = LWLockHeldCount(); + int held_lwlocks = LWLockHeldCount(); + if (held_lwlocks) elog(WARNING, "holding %d lwlocks at the end of ShutdownPostgres()", held_lwlocks); diff --git a/src/backend/utils/misc/queryjumble.c b/src/backend/utils/misc/queryjumble.c index 2ffa014618f..d35027275f1 100644 --- a/src/backend/utils/misc/queryjumble.c +++ b/src/backend/utils/misc/queryjumble.c @@ -787,7 +787,7 @@ JumbleExpr(JumbleState *jstate, Node *node) break; case T_JsonExpr: { - JsonExpr *jexpr = (JsonExpr *) node; + JsonExpr *jexpr = (JsonExpr *) node; APP_JUMB(jexpr->op); JumbleExpr(jstate, jexpr->formatted_expr); diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index a4c3b736678..8340a660526 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -469,7 +469,7 @@ struct Tuplesortstate /* These are specific to the index_btree subcase: */ bool enforceUnique; /* complain if we find duplicate tuples */ - bool uniqueNullsNotDistinct; /* unique constraint null treatment */ + bool uniqueNullsNotDistinct; /* unique constraint null treatment */ /* These are specific to the index_hash subcase: */ uint32 high_mask; /* masks for sortable part of hash code */ @@ -706,8 +706,8 @@ qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) return compare; /* - * No need to waste effort calling the tiebreak function when there are - * no other keys to sort on. + * No need to waste effort calling the tiebreak function when there are no + * other keys to sort on. */ if (state->onlyKey != NULL) return 0; @@ -730,8 +730,8 @@ qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) return compare; /* - * No need to waste effort calling the tiebreak function when there are - * no other keys to sort on. + * No need to waste effort calling the tiebreak function when there are no + * other keys to sort on. */ if (state->onlyKey != NULL) return 0; @@ -747,15 +747,15 @@ qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state) int compare; compare = ApplyInt32SortComparator(a->datum1, a->isnull1, - b->datum1, b->isnull1, - &state->sortKeys[0]); + b->datum1, b->isnull1, + &state->sortKeys[0]); if (compare != 0) return compare; /* - * No need to waste effort calling the tiebreak function when there are - * no other keys to sort on. + * No need to waste effort calling the tiebreak function when there are no + * other keys to sort on. */ if (state->onlyKey != NULL) return 0; |