summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Paquier2025-04-19 10:17:42 +0000
committerMichael Paquier2025-04-19 10:17:42 +0000
commit88e947136b47664b6936b35542f2d1eda0c90588 (patch)
tree6a5999ed2088fb1455940632b70a8d452ec56347
parent114f7fa81c72637d75b574269f2076dcc1104e24 (diff)
Fix typos and grammar in the code
The large majority of these have been introduced by recent commits done in the v18 development cycle. Author: Alexander Lakhin <[email protected]> Discussion: https://postgr.es/m/[email protected]
-rw-r--r--contrib/amcheck/verify_common.h2
-rw-r--r--contrib/amcheck/verify_gin.c6
-rw-r--r--contrib/pg_buffercache/pg_buffercache_pages.c4
-rw-r--r--contrib/pg_overexplain/expected/pg_overexplain.out8
-rw-r--r--contrib/pg_overexplain/pg_overexplain.c10
-rw-r--r--contrib/postgres_fdw/t/001_auth_scram.pl2
-rw-r--r--doc/src/sgml/ref/pg_dump.sgml2
-rw-r--r--doc/src/sgml/regress.sgml2
-rw-r--r--src/backend/access/gin/gininsert.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c2
-rw-r--r--src/backend/access/transam/xlog.c6
-rw-r--r--src/backend/catalog/catalog.c2
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/executor/execMain.c2
-rw-r--r--src/backend/executor/execPartition.c4
-rw-r--r--src/backend/executor/nodeModifyTable.c2
-rw-r--r--src/backend/executor/nodeSeqscan.c2
-rw-r--r--src/backend/nodes/queryjumblefuncs.c2
-rw-r--r--src/backend/postmaster/postmaster.c6
-rw-r--r--src/backend/storage/aio/README.md2
-rw-r--r--src/backend/storage/aio/method_worker.c2
-rw-r--r--src/backend/storage/buffer/bufmgr.c2
-rw-r--r--src/backend/storage/page/bufpage.c2
-rw-r--r--src/backend/utils/adt/mcxtfuncs.c6
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/cache/plancache.c2
-rw-r--r--src/backend/utils/mmgr/mcxt.c2
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl2
-rw-r--r--src/bin/pg_upgrade/relfilenumber.c2
-rw-r--r--src/bin/pg_upgrade/t/002_pg_upgrade.pl2
-rw-r--r--src/bin/psql/common.c2
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl2
-rw-r--r--src/include/access/gin_tuple.h4
-rw-r--r--src/include/nodes/pathnodes.h2
-rw-r--r--src/include/storage/aio_subsys.h2
-rw-r--r--src/interfaces/libpq/fe-auth-oauth-curl.c2
-rw-r--r--src/interfaces/libpq/fe-cancel.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c2
-rw-r--r--src/interfaces/libpq/fe-protocol3.c2
-rw-r--r--src/port/pg_localeconv_r.c4
-rw-r--r--src/port/pg_popcount_aarch64.c2
-rw-r--r--src/test/modules/oauth_validator/t/002_client.pl3
-rw-r--r--src/test/modules/test_aio/t/001_aio.pl2
-rw-r--r--src/test/recovery/t/035_standby_logical_decoding.pl2
-rw-r--r--src/test/subscription/t/007_ddl.pl2
45 files changed, 68 insertions, 69 deletions
diff --git a/contrib/amcheck/verify_common.h b/contrib/amcheck/verify_common.h
index b2565bfbbab..e78adb68808 100644
--- a/contrib/amcheck/verify_common.h
+++ b/contrib/amcheck/verify_common.h
@@ -16,7 +16,7 @@
#include "utils/relcache.h"
#include "miscadmin.h"
-/* Typedefs for callback functions for amcheck_lock_relation */
+/* Typedefs for callback functions for amcheck_lock_relation_and_check */
typedef void (*IndexCheckableCallback) (Relation index);
typedef void (*IndexDoCheckCallback) (Relation rel,
Relation heaprel,
diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c
index 318fe330518..b5f363562e3 100644
--- a/contrib/amcheck/verify_gin.c
+++ b/contrib/amcheck/verify_gin.c
@@ -359,8 +359,8 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
ptr->depth = stack->depth + 1;
/*
- * Set rightmost parent key to invalid iterm pointer. Its
- * value is 'Infinity' and not explicitly stored.
+ * Set rightmost parent key to invalid item pointer. Its value
+ * is 'Infinity' and not explicitly stored.
*/
if (rightlink == InvalidBlockNumber)
ItemPointerSetInvalid(&ptr->parentkey);
@@ -587,7 +587,7 @@ gin_check_parent_keys_consistency(Relation rel,
/*
* Check if it is properly adjusted. If succeed,
- * procced to the next key.
+ * proceed to the next key.
*/
if (ginCompareEntries(&state, attnum, current_key,
current_key_category, parent_key,
diff --git a/contrib/pg_buffercache/pg_buffercache_pages.c b/contrib/pg_buffercache/pg_buffercache_pages.c
index e1701bd56ef..6bfb9fb669e 100644
--- a/contrib/pg_buffercache/pg_buffercache_pages.c
+++ b/contrib/pg_buffercache/pg_buffercache_pages.c
@@ -289,7 +289,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
*
* Returns NUMA node ID for each memory page used by the buffer. Buffers may
* be smaller or larger than OS memory pages. For each buffer we return one
- * entry for each memory page used by the buffer (it fhe buffer is smaller,
+ * entry for each memory page used by the buffer (if the buffer is smaller,
* it only uses a part of one memory page).
*
* We expect both sizes (for buffers and memory pages) to be a power-of-2, so
@@ -335,7 +335,7 @@ pg_buffercache_numa_pages(PG_FUNCTION_ARGS)
* how the pages and buffers "align" in memory - the buffers may be
* shifted in some way, using more memory pages than necessary.
*
- * So we need to be careful about mappping buffers to memory pages. We
+ * So we need to be careful about mapping buffers to memory pages. We
* calculate the maximum number of pages a buffer might use, so that
* we allocate enough space for the entries. And then we count the
* actual number of entries as we scan the buffers.
diff --git a/contrib/pg_overexplain/expected/pg_overexplain.out b/contrib/pg_overexplain/expected/pg_overexplain.out
index 28252dbff6c..44120c388af 100644
--- a/contrib/pg_overexplain/expected/pg_overexplain.out
+++ b/contrib/pg_overexplain/expected/pg_overexplain.out
@@ -123,7 +123,7 @@ $$);
RTI 1 (relation, inherited, in-from-clause):
Eref: vegetables (id, name, genus)
Relation: vegetables
- Relation Kind: parititioned_table
+ Relation Kind: partitioned_table
Relation Lock Mode: AccessShareLock
Permission Info Index: 1
RTI 2 (group):
@@ -250,7 +250,7 @@ $$);
<In-From-Clause>true</In-From-Clause> +
<Eref>vegetables (id, name, genus)</Eref> +
<Relation>vegetables</Relation> +
- <Relation-Kind>parititioned_table</Relation-Kind> +
+ <Relation-Kind>partitioned_table</Relation-Kind> +
<Relation-Lock-Mode>AccessShareLock</Relation-Lock-Mode> +
<Permission-Info-Index>1</Permission-Info-Index> +
<Security-Barrier>false</Security-Barrier> +
@@ -454,7 +454,7 @@ SELECT * FROM vegetables WHERE genus = 'daucus';
RTI 1 (relation, inherited, in-from-clause):
Eref: vegetables (id, name, genus)
Relation: vegetables
- Relation Kind: parititioned_table
+ Relation Kind: partitioned_table
Relation Lock Mode: AccessShareLock
Permission Info Index: 1
RTI 2 (relation, in-from-clause):
@@ -478,7 +478,7 @@ INSERT INTO vegetables (name, genus) VALUES ('broccoflower', 'brassica');
RTI 1 (relation):
Eref: vegetables (id, name, genus)
Relation: vegetables
- Relation Kind: parititioned_table
+ Relation Kind: partitioned_table
Relation Lock Mode: RowExclusiveLock
Permission Info Index: 1
RTI 2 (result):
diff --git a/contrib/pg_overexplain/pg_overexplain.c b/contrib/pg_overexplain/pg_overexplain.c
index f60049f4ba6..de824566f8c 100644
--- a/contrib/pg_overexplain/pg_overexplain.c
+++ b/contrib/pg_overexplain/pg_overexplain.c
@@ -277,7 +277,7 @@ overexplain_per_plan_hook(PlannedStmt *plannedstmt,
* Print out various details from the PlannedStmt that wouldn't otherwise
* be displayed.
*
- * We don't try to print everything here. Information that would be displyed
+ * We don't try to print everything here. Information that would be displayed
* anyway doesn't need to be printed again here, and things with lots of
* substructure probably should be printed via separate options, or not at all.
*/
@@ -517,10 +517,10 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
relkind = "foreign_table";
break;
case RELKIND_PARTITIONED_TABLE:
- relkind = "parititioned_table";
+ relkind = "partitioned_table";
break;
case RELKIND_PARTITIONED_INDEX:
- relkind = "parititioned_index";
+ relkind = "partitioned_index";
break;
case '\0':
relkind = NULL;
@@ -632,7 +632,7 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
}
/*
- * add_rte_to_flat_rtable will clear coltypes, coltypemods, and
+ * add_rte_to_flat_rtable will clear coltypes, coltypmods, and
* colcollations, so skip those fields.
*
* If this is an ephemeral named relation, print out ENR-related
@@ -675,7 +675,7 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
* Emit a text property describing the contents of an Alias.
*
* Column lists can be quite long here, so perhaps we should have an option
- * to limit the display length by # of columsn or # of characters, but for
+ * to limit the display length by # of column or # of characters, but for
* now, just display everything.
*/
static void
diff --git a/contrib/postgres_fdw/t/001_auth_scram.pl b/contrib/postgres_fdw/t/001_auth_scram.pl
index ba178246eb7..b94a6a6293b 100644
--- a/contrib/postgres_fdw/t/001_auth_scram.pl
+++ b/contrib/postgres_fdw/t/001_auth_scram.pl
@@ -3,7 +3,7 @@
# Test SCRAM authentication when opening a new connection with a foreign
# server.
#
-# The test is executed by testing the SCRAM authentifcation on a looplback
+# The test is executed by testing the SCRAM authentifcation on a loopback
# connection on the same server and with different servers.
use strict;
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index bfc1e7b3524..b757d27ebd0 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -1315,7 +1315,7 @@ PostgreSQL documentation
</para>
<para>
The data section contains actual table data, large-object
- contents, statitistics for tables and materialized views and
+ contents, statistics for tables and materialized views and
sequence values.
Post-data items include definitions of indexes, triggers, rules,
statistics for indexes, and constraints other than validated check
diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml
index 0e5e8e8f309..bf4ffb30576 100644
--- a/doc/src/sgml/regress.sgml
+++ b/doc/src/sgml/regress.sgml
@@ -353,7 +353,7 @@ make check-world PG_TEST_EXTRA='kerberos ldap ssl load_balance libpq_encryption'
<listitem>
<para>
Runs the test suite under <filename>src/test/modules/oauth_validator</filename>.
- This opens TCP/IP listen sockets for a test-server running HTTPS.
+ This opens TCP/IP listen sockets for a test server running HTTPS.
</para>
</listitem>
</varlistentry>
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index cfab93ec30c..a7b7b5996e3 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -167,7 +167,7 @@ typedef struct
/*
* The sortstate used only within a single worker for the first merge pass
- * happenning there. In principle it doesn't need to be part of the build
+ * happening there. In principle it doesn't need to be part of the build
* state and we could pass it around directly, but it's more convenient
* this way. And it's part of the build state, after all.
*/
@@ -1306,7 +1306,7 @@ GinBufferIsEmpty(GinBuffer *buffer)
* Compare if the tuple matches the already accumulated data in the GIN
* buffer. Compare scalar fields first, before the actual key.
*
- * Returns true if the key matches, and the TID belonds to the buffer, or
+ * Returns true if the key matches, and the TID belongs to the buffer, or
* false if the key does not match.
*/
static bool
@@ -1497,7 +1497,7 @@ GinBufferStoreTuple(GinBuffer *buffer, GinTuple *tup)
buffer->items = repalloc(buffer->items,
(buffer->nitems + tup->nitems) * sizeof(ItemPointerData));
- new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfronzen */
+ new = ginMergeItemPointers(&buffer->items[buffer->nfrozen], /* first unfrozen */
(buffer->nitems - buffer->nfrozen), /* num of unfrozen */
items, tup->nitems, &nnew);
@@ -1531,7 +1531,7 @@ GinBufferReset(GinBuffer *buffer)
pfree(DatumGetPointer(buffer->key));
/*
- * Not required, but makes it more likely to trigger NULL derefefence if
+ * Not required, but makes it more likely to trigger NULL dereference if
* using the value incorrectly, etc.
*/
buffer->key = (Datum) 0;
@@ -1603,7 +1603,7 @@ GinBufferCanAddKey(GinBuffer *buffer, GinTuple *tup)
*
* After waiting for all workers to finish, merge the per-worker results into
* the complete index. The results from each worker are sorted by block number
- * (start of the page range). While combinig the per-worker results we merge
+ * (start of the page range). While combining the per-worker results we merge
* summaries for the same page range, and also fill-in empty summaries for
* ranges without any tuples.
*
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index f69397623df..77264ddeecb 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -1792,7 +1792,7 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
truncatt = BTreeTupleGetNAtts(itup, rel);
pstate.forcenonrequired = false;
- pstate.startikey = 0; /* _bt_set_startikey ignores HIKEY */
+ pstate.startikey = 0; /* _bt_set_startikey ignores P_HIKEY */
_bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt);
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index ec40c0b7c42..2d4c346473b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -473,7 +473,7 @@ typedef struct XLogCtlData
XLogRecPtr InitializedFrom;
/*
- * Latest reserved for inititalization page in the cache (last byte
+ * Latest reserved for initialization page in the cache (last byte
* position + 1).
*
* To change the identity of a buffer, you need to advance
@@ -2221,7 +2221,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
* m must observe f[k] == false. Otherwise, it will later attempt
* CAS(v, k, k + 1) with success.
* 4. Therefore, corresponding read_barrier() (while j == k) on
- * process m happend before write_barrier() of process k. But then
+ * process m reached before write_barrier() of process k. But then
* process k attempts CAS(v, k, k + 1) after process m successfully
* incremented v to k, and that CAS operation must succeed.
* That leads to a contradiction. So, there is no such k (k < n)
@@ -2253,7 +2253,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
if (pg_atomic_read_u64(&XLogCtl->xlblocks[nextidx]) != NewPageEndPtr)
{
/*
- * Page at nextidx wasn't initialized yet, so we cann't move
+ * Page at nextidx wasn't initialized yet, so we can't move
* InitializedUpto further. It will be moved by backend which
* will initialize nextidx.
*/
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 35ebb0ccda4..60000bd0bc7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -143,7 +143,7 @@ IsCatalogRelationOid(Oid relid)
*
* The relcache must not use these indexes. Inserting into any UNIQUE
* index compares index keys while holding BUFFER_LOCK_EXCLUSIVE.
- * bttextcmp() can search the COLLID catcache. Depending on concurrent
+ * bttextcmp() can search the COLLOID catcache. Depending on concurrent
* invalidation traffic, catcache can reach relcache builds. A backend
* would self-deadlock on LWLocks if the relcache build read the
* exclusive-locked buffer.
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 80f689bbbc5..265b1c397fb 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -11999,7 +11999,7 @@ DropForeignKeyConstraintTriggers(Relation trigrel, Oid conoid, Oid confrelid,
if (OidIsValid(confrelid) && trgform->tgrelid != confrelid)
continue;
- /* We should be droping trigger related to foreign key constraint */
+ /* We should be dropping trigger related to foreign key constraint */
Assert(trgform->tgfoid == F_RI_FKEY_CHECK_INS ||
trgform->tgfoid == F_RI_FKEY_CHECK_UPD ||
trgform->tgfoid == F_RI_FKEY_CASCADE_DEL ||
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 2da848970be..7230f968101 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1861,7 +1861,7 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
MemoryContext oldContext;
/*
- * CheckConstraintFetch let this pass with only a warning, but now we
+ * CheckNNConstraintFetch let this pass with only a warning, but now we
* should fail rather than possibly failing to enforce an important
* constraint.
*/
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 5a77c253826..9435cc21fe7 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -1778,7 +1778,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
* Updates the PartitionPruneState found at given part_prune_index in
* EState.es_part_prune_states for use during "exec" pruning if required.
* Also returns the set of subplans to initialize that would be stored at
- * part_prune_index in EState.es_part_prune_result by
+ * part_prune_index in EState.es_part_prune_results by
* ExecDoInitialPruning(). Maps in PartitionPruneState are updated to
* account for initial pruning possibly having eliminated some of the
* subplans.
@@ -2109,7 +2109,7 @@ CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
*/
partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
- /* Remember for InitExecPartitionPruneContext(). */
+ /* Remember for InitExecPartitionPruneContexts(). */
pprune->partrel = partrel;
partkey = RelationGetPartitionKey(partrel);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 333cbf78343..46d533b7288 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1311,7 +1311,7 @@ ExecInsert(ModifyTableContext *context,
/*
* Convert the OLD tuple to the new partition's format/slot, if
- * needed. Note that ExceDelete() already converted it to the
+ * needed. Note that ExecDelete() already converted it to the
* root's partition's format/slot.
*/
oldSlot = context->cpDeletedSlot;
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 6f9e991eeae..ed35c58c2c3 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -100,7 +100,7 @@ SeqRecheck(SeqScanState *node, TupleTableSlot *slot)
* ExecSeqScan(node)
*
* Scans the relation sequentially and returns the next qualifying
- * tuple. This variant is used when there is no es_eqp_active, no qual
+ * tuple. This variant is used when there is no es_epq_active, no qual
* and no projection. Passing const-NULLs for these to ExecScanExtended
* allows the compiler to eliminate the additional code that would
* ordinarily be required for the evaluation of these.
diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c
index 27fb87d3aaa..d1e82a63f09 100644
--- a/src/backend/nodes/queryjumblefuncs.c
+++ b/src/backend/nodes/queryjumblefuncs.c
@@ -357,7 +357,7 @@ AppendJumble64(JumbleState *jstate, const unsigned char *value)
/*
* FlushPendingNulls
- * Incorporate the pending_null value into the jumble buffer.
+ * Incorporate the pending_nulls value into the jumble buffer.
*
* Note: Callers must ensure that there's at least 1 pending NULL.
*/
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 17fed96fe20..490f7ce3664 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2718,7 +2718,7 @@ HandleFatalError(QuitSignalReason reason, bool consider_sigabrt)
/*
* Choose the appropriate new state to react to the fatal error. Unless we
* were already in the process of shutting down, we go through
- * PM_WAIT_BACKEND. For errors during the shutdown sequence, we directly
+ * PM_WAIT_BACKENDS. For errors during the shutdown sequence, we directly
* switch to PM_WAIT_DEAD_END.
*/
switch (pmState)
@@ -3001,7 +3001,7 @@ PostmasterStateMachine(void)
/*
* Stop any dead-end children and stop creating new ones.
*
- * NB: Similar code exists in HandleFatalErrors(), when the
+ * NB: Similar code exists in HandleFatalError(), when the
* error happens in pmState > PM_WAIT_BACKENDS.
*/
UpdatePMState(PM_WAIT_DEAD_END);
@@ -3082,7 +3082,7 @@ PostmasterStateMachine(void)
{
/*
* PM_WAIT_IO_WORKERS state ends when there's only checkpointer and
- * dead_end children left.
+ * dead-end children left.
*/
if (io_worker_count == 0)
{
diff --git a/src/backend/storage/aio/README.md b/src/backend/storage/aio/README.md
index b00de269ad9..f10b5c7e31e 100644
--- a/src/backend/storage/aio/README.md
+++ b/src/backend/storage/aio/README.md
@@ -103,7 +103,7 @@ pgaio_io_set_handle_data_32(ioh, (uint32 *) buffer, 1);
*
* E.g. md.c needs to translate block numbers into offsets in segments.
*
- * Once the IO handle has been handed off to smgstartreadv(), it may not
+ * Once the IO handle has been handed off to smgrstartreadv(), it may not
* further be used, as the IO may immediately get executed below
* smgrstartreadv() and the handle reused for another IO.
*
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 0fde2a5b30d..6e8b1327946 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -321,7 +321,7 @@ pgaio_worker_die(int code, Datum arg)
}
/*
- * Register the worker in shared memory, assign MyWorkerId and register a
+ * Register the worker in shared memory, assign MyIoWorkerId and register a
* shutdown callback to release registration.
*/
static void
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 1f2a9fe9976..fe0ceeadc13 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -4970,7 +4970,7 @@ FlushRelationBuffers(Relation rel)
ResourceOwnerEnlarge(CurrentResourceOwner);
/*
- * Pin/upin mostly to make valgrind work, but it also seems
+ * Pin/unpin mostly to make valgrind work, but it also seems
* like the right thing to do.
*/
PinLocalBuffer(bufHdr, false);
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 82457bacc62..dbb49ed9197 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -88,7 +88,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* To allow the caller to report statistics about checksum failures,
* *checksum_failure_p can be passed in. Note that there may be checksum
* failures even if this function returns true, due to
- * IGNORE_CHECKSUM_FAILURE.
+ * PIV_IGNORE_CHECKSUM_FAILURE.
*/
bool
PageIsVerified(PageData *page, BlockNumber blkno, int flags, bool *checksum_failure_p)
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 254cdd34fba..206b601a52b 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -323,8 +323,8 @@ pg_log_backend_memory_contexts(PG_FUNCTION_ARGS)
* Signal a backend or an auxiliary process to send its memory contexts,
* wait for the results and display them.
*
- * By default, only superusers or users with PG_READ_ALL_STATS are allowed to
- * signal a process to return the memory contexts. This is because allowing
+ * By default, only superusers or users with ROLE_PG_READ_ALL_STATS are allowed
+ * to signal a process to return the memory contexts. This is because allowing
* any users to issue this request at an unbounded rate would cause lots of
* requests to be sent, which can lead to denial of service. Additional roles
* can be permitted with GRANT.
@@ -495,7 +495,7 @@ pg_get_process_memory_contexts(PG_FUNCTION_ARGS)
* statistics are available within the allowed time then display
* previously published statistics if there are any. If no
* previous statistics are available then return NULL. The timer
- * is defined in milliseconds since thats what the condition
+ * is defined in milliseconds since that's what the condition
* variable sleep uses.
*/
if (ConditionVariableTimedSleep(&memCxtState[procNumber].memcxt_cv,
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index a73aac4f98c..ab6317de5ae 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -19,7 +19,7 @@
* immediately.
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are
- * permanentaly set to "C", and then we use temporary locale_t
+ * permanently set to "C", and then we use temporary locale_t
* objects when we need to look up locale data based on the GUCs
* of the same name. Information is cached when the GUCs change.
* The cached information is only used by the formatting functions
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 3b681647060..9bcbc4c3e97 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -1271,7 +1271,7 @@ UpdateCachedPlan(CachedPlanSource *plansource, int query_index,
/*
* XXX Should this also (re)set the properties of the CachedPlan that are
* set in BuildCachedPlan() after creating the fresh plans such as
- * planRoleId, dependsOnRole, and save_xmin?
+ * planRoleId, dependsOnRole, and saved_xmin?
*/
/*
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index e9aab36d110..468d0250b2e 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -910,7 +910,7 @@ MemoryContextStatsDetail(MemoryContext context,
*
* Print stats for this context if possible, but in any case accumulate counts
* into *totals (if not NULL). The callers should make sure that print_location
- * is set to PRINT_STATS_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
+ * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
*/
static void
MemoryContextStatsInternal(MemoryContext context, int level,
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
index d6821c5615f..5acd49f1559 100644
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ b/src/bin/pg_dump/t/006_pg_dumpall.pl
@@ -384,7 +384,7 @@ $node->command_fails_like(
'--format' => 'custom',
'-d' => 'dbpq', ],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-exist datbase is given with -d option in pg_restore with dump of pg_dumpall');
+ 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
$node->stop('fast');
diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c
index aa205aec51d..79bc474a0bb 100644
--- a/src/bin/pg_upgrade/relfilenumber.c
+++ b/src/bin/pg_upgrade/relfilenumber.c
@@ -36,7 +36,7 @@ static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_
*
* // be sure to sync any remaining files in the queue
* sync_queue_sync_all();
- * synq_queue_destroy();
+ * sync_queue_destroy();
*/
#define SYNC_QUEUE_MAX_LEN (1024)
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 78fab48349b..7d82593879d 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -72,7 +72,7 @@ sub filter_dump
# adjust_child_columns is passed to adjust_regress_dumpfile() which actually
# adjusts the dump output.
#
-# The name of the file containting adjusted dump is returned.
+# The name of the file containing adjusted dump is returned.
sub get_dump_for_comparison
{
my ($node, $db, $file_prefix, $adjust_child_columns) = @_;
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 5382a07b74d..21d660a8961 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1459,7 +1459,7 @@ DescribeQuery(const char *query, double *elapsed_msec)
*
* If a synchronisation point is found, we can stop discarding results as
* the pipeline will switch back to a clean state. If no synchronisation
- * point is available, we need to stop when ther are no more pending
+ * point is available, we need to stop when there are no more pending
* results, otherwise, calling PQgetResult() would block.
*/
static PGresult *
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index bb56b353492..75ac24a7a55 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -197,7 +197,7 @@ $node->command_fails_like(
'postgres',
],
qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/,
- 'cannot use options --excludes-chema and ---table at the same time');
+ 'cannot use options --exclude-schema and ---table at the same time');
$node->command_fails_like(
[
'vacuumdb',
diff --git a/src/include/access/gin_tuple.h b/src/include/access/gin_tuple.h
index ce555031335..4a50565960f 100644
--- a/src/include/access/gin_tuple.h
+++ b/src/include/access/gin_tuple.h
@@ -7,8 +7,8 @@
* src/include/access/gin.h
*--------------------------------------------------------------------------
*/
-#ifndef GIN_TUPLE_
-#define GIN_TUPLE_
+#ifndef GIN_TUPLE_H
+#define GIN_TUPLE_H
#include "access/ginblock.h"
#include "storage/itemptr.h"
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index bb678bdcdcd..011e5a811c3 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -1592,7 +1592,7 @@ typedef struct
* equivalent and closely-related orderings. (See optimizer/README for more
* information.)
*
- * Note: pk_strategy is either COMPARE_LT (for ASC) or COMPARE_GT (for DESC).
+ * Note: pk_cmptype is either COMPARE_LT (for ASC) or COMPARE_GT (for DESC).
*/
typedef struct PathKey
{
diff --git a/src/include/storage/aio_subsys.h b/src/include/storage/aio_subsys.h
index 8a8ce87f62a..0cf36bb35da 100644
--- a/src/include/storage/aio_subsys.h
+++ b/src/include/storage/aio_subsys.h
@@ -31,7 +31,7 @@ extern void pgaio_error_cleanup(void);
extern void AtEOXact_Aio(bool is_commit);
-/* aio_worker.c */
+/* method_worker.c */
extern bool pgaio_workers_enabled(void);
#endif /* AIO_SUBSYS_H */
diff --git a/src/interfaces/libpq/fe-auth-oauth-curl.c b/src/interfaces/libpq/fe-auth-oauth-curl.c
index ddd87dcf02d..c195e00cd28 100644
--- a/src/interfaces/libpq/fe-auth-oauth-curl.c
+++ b/src/interfaces/libpq/fe-auth-oauth-curl.c
@@ -1205,7 +1205,7 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
res = epoll_ctl(actx->mux, op, socket, &ev);
if (res < 0 && errno == EEXIST)
{
- /* We already had this socket in the pollset. */
+ /* We already had this socket in the poll set. */
op = EPOLL_CTL_MOD;
res = epoll_ctl(actx->mux, op, socket, &ev);
}
diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c
index e84e64bf2a7..25de2a337c9 100644
--- a/src/interfaces/libpq/fe-cancel.c
+++ b/src/interfaces/libpq/fe-cancel.c
@@ -50,7 +50,7 @@ struct pg_cancel
* retransmits */
/* Pre-constructed cancel request packet starts here */
- int32 cancel_pkt_len; /* in network-byte-order */
+ int32 cancel_pkt_len; /* in network byte order */
char cancel_req[FLEXIBLE_ARRAY_MEMBER]; /* CancelRequestPacket */
};
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index bd86de53c67..e08b344276f 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -693,7 +693,7 @@ pqDropServerData(PGconn *conn)
conn->oauth_want_retry = false;
/*
- * Cancel connections need to retain their be_pid and be_key across
+ * Cancel connections need to retain their be_pid and be_cancel_key across
* PQcancelReset invocations, otherwise they would not have access to the
* secret token of the connection they are supposed to cancel.
*/
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index d85910f41fc..ca19e654a1c 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -1486,7 +1486,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn)
return 0;
eof:
- libpq_append_conn_error(conn, "received invalid protocol negotation message: message too short");
+ libpq_append_conn_error(conn, "received invalid protocol negotiation message: message too short");
failure:
conn->asyncStatus = PGASYNC_READY;
pqSaveErrorResult(conn);
diff --git a/src/port/pg_localeconv_r.c b/src/port/pg_localeconv_r.c
index 938b4f82e12..4554ab84e9b 100644
--- a/src/port/pg_localeconv_r.c
+++ b/src/port/pg_localeconv_r.c
@@ -216,7 +216,7 @@ pg_localeconv_copy_members(struct lconv *dst,
* implied by the LC_MONETARY or LC_NUMERIC locale name. On Windows, LC_CTYPE
* has to match to get sane results.
*
- * To get predicable results on all platforms, we'll call the underlying
+ * To get predictable results on all platforms, we'll call the underlying
* routines with LC_ALL set to the appropriate locale for each set of members,
* and merge the results. Three members of the resulting object are therefore
* guaranteed to be encoded with LC_NUMERIC's codeset: "decimal_point",
@@ -224,7 +224,7 @@ pg_localeconv_copy_members(struct lconv *dst,
* LC_MONETARY's codeset.
*
* Returns 0 on success. Returns non-zero on failure, and sets errno. On
- * success, the caller is responsible for calling pg_localeconf_free() on the
+ * success, the caller is responsible for calling pg_localeconv_free() on the
* output struct to free the string members it contains.
*/
int
diff --git a/src/port/pg_popcount_aarch64.c b/src/port/pg_popcount_aarch64.c
index bed0f7ab242..e515e4d45b8 100644
--- a/src/port/pg_popcount_aarch64.c
+++ b/src/port/pg_popcount_aarch64.c
@@ -457,7 +457,7 @@ pg_popcount_masked_neon(const char *buf, int bytes, bits8 mask)
popcnt += vaddvq_u64(vaddq_u64(accum3, accum4));
/*
- * Process remining 8-byte blocks.
+ * Process remaining 8-byte blocks.
*/
for (; bytes >= sizeof(uint64); bytes -= sizeof(uint64))
{
diff --git a/src/test/modules/oauth_validator/t/002_client.pl b/src/test/modules/oauth_validator/t/002_client.pl
index 54769f12f57..8dd502f41e1 100644
--- a/src/test/modules/oauth_validator/t/002_client.pl
+++ b/src/test/modules/oauth_validator/t/002_client.pl
@@ -47,8 +47,7 @@ local all test oauth issuer="$issuer" scope="$scope"
});
$node->reload;
-my ($log_start, $log_end);
-$log_start = $node->wait_for_log(qr/reloading configuration files/);
+my $log_start = $node->wait_for_log(qr/reloading configuration files/);
$ENV{PGOAUTHDEBUG} = "UNSAFE";
diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl
index ef4e5247e5b..4527c70785d 100644
--- a/src/test/modules/test_aio/t/001_aio.pl
+++ b/src/test/modules/test_aio/t/001_aio.pl
@@ -560,7 +560,7 @@ INSERT INTO tmp_ok SELECT generate_series(1, 10000);
qr/^t$/,
qr/^$/);
- # Because local buffers don't use IO_IN_PROGRESS, a second StartLocalBufer
+ # Because local buffers don't use IO_IN_PROGRESS, a second StartLocalBufferIO
# succeeds as well. This test mostly serves as a documentation of that
# fact. If we had actually started IO, it'd be different.
psql_like(
diff --git a/src/test/recovery/t/035_standby_logical_decoding.pl b/src/test/recovery/t/035_standby_logical_decoding.pl
index b85a4a4eda6..921813483e3 100644
--- a/src/test/recovery/t/035_standby_logical_decoding.pl
+++ b/src/test/recovery/t/035_standby_logical_decoding.pl
@@ -799,7 +799,7 @@ $logstart = -s $node_standby->logfile;
reactive_slots_change_hfs_and_wait_for_xmins('no_conflict_', 'pruning_', 0,
0);
-# Injection_point avoids seeing a xl_running_xacts. This is required because if
+# Injection point avoids seeing a xl_running_xacts. This is required because if
# it is generated between the last two updates, then the catalog_xmin of the
# active slot could be updated, and hence, the conflict won't occur. See
# comments atop wait_until_vacuum_can_remove.
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index de846193bb5..7d12bcbddb6 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -80,7 +80,7 @@ $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION mysub1");
# Test ALTER PUBLICATION RENAME command during the replication
#
-# Test function for swaping name of publications
+# Test function for swapping name of publications
sub test_swap
{
my ($table_name, $pubname, $appname) = @_;