summaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorMichael Paquier2019-06-17 07:13:16 +0000
committerMichael Paquier2019-06-17 07:13:16 +0000
commit3412030205211079f9b0510e2244083e4ee7b15a (patch)
tree89b607651122d058e79704cd15764db20b8a3025 /src/backend/executor
parent9d20b0ec8f2af43041b1a65e5fcd91acc47e9ace (diff)
Fix more typos and inconsistencies in the tree
Author: Alexander Lakhin Discussion: https://postgr.es/m/[email protected]
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/execMain.c5
-rw-r--r--src/backend/executor/execUtils.c2
-rw-r--r--src/backend/executor/nodeHash.c8
-rw-r--r--src/backend/executor/nodeProjectSet.c11
4 files changed, 14 insertions, 12 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index a2bd9a7859b..27f03455152 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2876,8 +2876,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
- * nested EPQ checks they should share es_epqTuple arrays. This allows
- * sub-rechecks to inherit the values being examined by an outer recheck.
+ * nested EPQ checks they should share es_epqTupleSlot arrays. This
+ * allows sub-rechecks to inherit the values being examined by an outer
+ * recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
if (parentestate->es_epqTupleSlot != NULL)
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 2835a01e15a..9b866a5dd66 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -642,7 +642,7 @@ ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc)
}
/* ----------------
- * ExecCreateSlotFromOuterPlan
+ * ExecCreateScanSlotFromOuterPlan
* ----------------
*/
void
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 3c33ce74e04..d16120b9c48 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1049,8 +1049,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
/*
* ExecParallelHashIncreaseNumBatches
- * Every participant attached to grow_barrier must run this function
- * when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
+ * Every participant attached to grow_batches_barrier must run this
+ * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
*/
static void
ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
@@ -1106,7 +1106,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
* The combined work_mem of all participants wasn't
* enough. Therefore one batch per participant would be
* approximately equivalent and would probably also be
- * insufficient. So try two batches per particiant,
+ * insufficient. So try two batches per participant,
* rounded up to a power of two.
*/
new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
@@ -1674,7 +1674,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
}
/*
- * ExecHashTableParallelInsert
+ * ExecParallelHashTableInsert
* insert a tuple into a shared hash table or shared batch tuplestore
*/
void
diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c
index 515dd61f079..277d2783711 100644
--- a/src/backend/executor/nodeProjectSet.c
+++ b/src/backend/executor/nodeProjectSet.c
@@ -297,11 +297,12 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags)
Assert(node->plan.qual == NIL);
/*
- * Create a memory context that ExecMakeFunctionResult can use to evaluate
- * function arguments in. We can't use the per-tuple context for this
- * because it gets reset too often; but we don't want to leak evaluation
- * results into the query-lifespan context either. We use one context for
- * the arguments of all tSRFs, as they have roughly equivalent lifetimes.
+ * Create a memory context that ExecMakeFunctionResultSet can use to
+ * evaluate function arguments in. We can't use the per-tuple context for
+ * this because it gets reset too often; but we don't want to leak
+ * evaluation results into the query-lifespan context either. We use one
+ * context for the arguments of all tSRFs, as they have roughly equivalent
+ * lifetimes.
*/
state->argcontext = AllocSetContextCreate(CurrentMemoryContext,
"tSRF function arguments",