summaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeHashjoin.c
diff options
context:
space:
mode:
authorPeter Geoghegan2022-09-20 02:18:36 +0000
committerPeter Geoghegan2022-09-20 02:18:36 +0000
commitbfcf1b34805f70df48eedeec237230d0cc1154a6 (patch)
treebaf824c317ecf02c49d758e75e72d69c5b6a4ec6 /src/backend/executor/nodeHashjoin.c
parentc47885bd8b6997ccb0cc1997f61f6f98a79ce32a (diff)
Harmonize parameter names in storage and AM code.
Make sure that function declarations use names that exactly match the corresponding names from function definitions in storage, catalog, access method, executor, and logical replication code, as well as in miscellaneous utility/library code. Like other recent commits that cleaned up function parameter names, this commit was written with help from clang-tidy. Later commits will do the same for other parts of the codebase. Author: Peter Geoghegan <[email protected]> Reviewed-By: David Rowley <[email protected]> Discussion: https://postgr.es/m/CAH2-WznJt9CMM9KJTMjJh_zbL5hD9oX44qdJ4aqZtjFi-zA3Tg@mail.gmail.com
Diffstat (limited to 'src/backend/executor/nodeHashjoin.c')
-rw-r--r--src/backend/executor/nodeHashjoin.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 87403e24781..2718c2113f5 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -145,7 +145,7 @@ static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
TupleTableSlot *tupleSlot);
static bool ExecHashJoinNewBatch(HashJoinState *hjstate);
static bool ExecParallelHashJoinNewBatch(HashJoinState *hjstate);
-static void ExecParallelHashJoinPartitionOuter(HashJoinState *node);
+static void ExecParallelHashJoinPartitionOuter(HashJoinState *hjstate);
/* ----------------------------------------------------------------
@@ -1502,11 +1502,11 @@ ExecHashJoinInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
* ----------------------------------------------------------------
*/
void
-ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *cxt)
+ExecHashJoinReInitializeDSM(HashJoinState *state, ParallelContext *pcxt)
{
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(cxt->toc, plan_node_id, false);
+ shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch