diff options
author | Bruce Momjian | 2011-04-10 15:42:00 +0000 |
---|---|---|
committer | Bruce Momjian | 2011-04-10 15:42:00 +0000 |
commit | bf50caf105a901c4f83ac1df3cdaf910c26694a4 (patch) | |
tree | dac42d7795070f107eefb085c500f86a4d35f92f /src/backend/executor | |
parent | 9a8b73147c07e02e10e0d0a34aa99d72e3336fb2 (diff) |
pgindent run before PG 9.1 beta 1.
Diffstat (limited to 'src/backend/executor')
-rw-r--r-- | src/backend/executor/execMain.c | 59 | ||||
-rw-r--r-- | src/backend/executor/execQual.c | 6 | ||||
-rw-r--r-- | src/backend/executor/execUtils.c | 6 | ||||
-rw-r--r-- | src/backend/executor/functions.c | 46 | ||||
-rw-r--r-- | src/backend/executor/nodeAgg.c | 8 | ||||
-rw-r--r-- | src/backend/executor/nodeBitmapIndexscan.c | 4 | ||||
-rw-r--r-- | src/backend/executor/nodeForeignscan.c | 2 | ||||
-rw-r--r-- | src/backend/executor/nodeHash.c | 16 | ||||
-rw-r--r-- | src/backend/executor/nodeHashjoin.c | 53 | ||||
-rw-r--r-- | src/backend/executor/nodeIndexscan.c | 20 | ||||
-rw-r--r-- | src/backend/executor/nodeLimit.c | 14 | ||||
-rw-r--r-- | src/backend/executor/nodeLockRows.c | 2 | ||||
-rw-r--r-- | src/backend/executor/nodeMergeAppend.c | 36 | ||||
-rw-r--r-- | src/backend/executor/nodeMergejoin.c | 2 | ||||
-rw-r--r-- | src/backend/executor/nodeModifyTable.c | 40 | ||||
-rw-r--r-- | src/backend/executor/nodeNestloop.c | 11 | ||||
-rw-r--r-- | src/backend/executor/nodeRecursiveunion.c | 2 | ||||
-rw-r--r-- | src/backend/executor/nodeSetOp.c | 2 | ||||
-rw-r--r-- | src/backend/executor/nodeWindowAgg.c | 2 | ||||
-rw-r--r-- | src/backend/executor/spi.c | 10 |
20 files changed, 172 insertions, 169 deletions
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index caa9faea87f..86ec9870198 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -19,7 +19,7 @@ * ExecutorRun accepts direction and count arguments that specify whether * the plan is to be executed forwards, backwards, and for how many tuples. * In some cases ExecutorRun may be called multiple times to process all - * the tuples for a plan. It is also acceptable to stop short of executing + * the tuples for a plan. It is also acceptable to stop short of executing * the whole plan (but only if it is a SELECT). * * ExecutorFinish must be called after the final ExecutorRun call and @@ -168,6 +168,7 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags) switch (queryDesc->operation) { case CMD_SELECT: + /* * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to * mark tuples @@ -332,12 +333,12 @@ standard_ExecutorRun(QueryDesc *queryDesc, * ExecutorFinish * * This routine must be called after the last ExecutorRun call. - * It performs cleanup such as firing AFTER triggers. It is + * It performs cleanup such as firing AFTER triggers. It is * separate from ExecutorEnd because EXPLAIN ANALYZE needs to * include these actions in the total runtime. * * We provide a function hook variable that lets loadable plugins - * get control when ExecutorFinish is called. Such a plugin would + * get control when ExecutorFinish is called. Such a plugin would * normally call standard_ExecutorFinish(). * * ---------------------------------------------------------------- @@ -425,9 +426,9 @@ standard_ExecutorEnd(QueryDesc *queryDesc) Assert(estate != NULL); /* - * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. - * This Assert is needed because ExecutorFinish is new as of 9.1, and - * callers might forget to call it. + * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This + * Assert is needed because ExecutorFinish is new as of 9.1, and callers + * might forget to call it. */ Assert(estate->es_finished || (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY)); @@ -519,7 +520,7 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) foreach(l, rangeTable) { - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); + RangeTblEntry *rte = (RangeTblEntry *) lfirst(l); result = ExecCheckRTEPerms(rte); if (!result) @@ -533,8 +534,8 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation) } if (ExecutorCheckPerms_hook) - result = (*ExecutorCheckPerms_hook)(rangeTable, - ereport_on_violation); + result = (*ExecutorCheckPerms_hook) (rangeTable, + ereport_on_violation); return result; } @@ -980,7 +981,7 @@ InitPlan(QueryDesc *queryDesc, int eflags) void CheckValidResultRel(Relation resultRel, CmdType operation) { - TriggerDesc *trigDesc = resultRel->trigdesc; + TriggerDesc *trigDesc = resultRel->trigdesc; switch (resultRel->rd_rel->relkind) { @@ -1005,26 +1006,26 @@ CheckValidResultRel(Relation resultRel, CmdType operation) case CMD_INSERT: if (!trigDesc || !trigDesc->trig_insert_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot insert into view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot insert into view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger."))); break; case CMD_UPDATE: if (!trigDesc || !trigDesc->trig_update_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot update view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot update view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger."))); break; case CMD_DELETE: if (!trigDesc || !trigDesc->trig_delete_instead_row) ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("cannot delete from view \"%s\"", - RelationGetRelationName(resultRel)), - errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger."))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("cannot delete from view \"%s\"", + RelationGetRelationName(resultRel)), + errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger."))); break; default: elog(ERROR, "unrecognized CmdType: %d", (int) operation); @@ -1137,8 +1138,8 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) /* * Open the target relation's relcache entry. We assume that an * appropriate lock is still held by the backend from whenever the trigger - * event got queued, so we need take no new lock here. Also, we need - * not recheck the relkind, so no need for CheckValidResultRel. + * event got queued, so we need take no new lock here. Also, we need not + * recheck the relkind, so no need for CheckValidResultRel. */ rel = heap_open(relid, NoLock); @@ -1238,12 +1239,12 @@ ExecPostprocessPlan(EState *estate) /* * Run any secondary ModifyTable nodes to completion, in case the main - * query did not fetch all rows from them. (We do this to ensure that + * query did not fetch all rows from them. (We do this to ensure that * such nodes have predictable results.) */ foreach(lc, estate->es_auxmodifytables) { - PlanState *ps = (PlanState *) lfirst(lc); + PlanState *ps = (PlanState *) lfirst(lc); for (;;) { @@ -2220,9 +2221,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) * ExecInitSubPlan expects to be able to find these entries. Some of the * SubPlans might not be used in the part of the plan tree we intend to * run, but since it's not easy to tell which, we just initialize them - * all. (However, if the subplan is headed by a ModifyTable node, then - * it must be a data-modifying CTE, which we will certainly not need to - * re-run, so we can skip initializing it. This is just an efficiency + * all. (However, if the subplan is headed by a ModifyTable node, then it + * must be a data-modifying CTE, which we will certainly not need to + * re-run, so we can skip initializing it. This is just an efficiency * hack; it won't skip data-modifying CTEs for which the ModifyTable node * is not at the top.) */ diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index c153ca00dbf..5f0b58f43b7 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -79,9 +79,9 @@ static Datum ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext, static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParamExec(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static void init_fcache(Oid foid, Oid input_collation, FuncExprState *fcache, MemoryContext fcacheCxt, bool needDescForSets); static void ShutdownFuncExpr(Datum arg); @@ -1043,7 +1043,7 @@ ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext, ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("no value found for parameter %d", thisParamId))); - return (Datum) 0; /* keep compiler quiet */ + return (Datum) 0; /* keep compiler quiet */ } diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 7e84ccdd9cd..0cbbe04d3bd 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -1319,9 +1319,9 @@ retry: /* * Ordinarily, at this point the search should have found the originally * inserted tuple, unless we exited the loop early because of conflict. - * However, it is possible to define exclusion constraints for which - * that wouldn't be true --- for instance, if the operator is <>. - * So we no longer complain if found_self is still false. + * However, it is possible to define exclusion constraints for which that + * wouldn't be true --- for instance, if the operator is <>. So we no + * longer complain if found_self is still false. */ econtext->ecxt_scantuple = save_scantuple; diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index 70d126c5213..9c867bbae20 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -81,7 +81,7 @@ typedef struct char *fname; /* function name (for error msgs) */ char *src; /* function body text (for error msgs) */ - SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */ + SQLFunctionParseInfoPtr pinfo; /* data for parser callback hooks */ Oid rettype; /* actual return type */ int16 typlen; /* length of the return type */ @@ -119,7 +119,7 @@ typedef struct SQLFunctionParseInfo Oid *argtypes; /* resolved types of input arguments */ int nargs; /* number of input arguments */ Oid collation; /* function's input collation, if known */ -} SQLFunctionParseInfo; +} SQLFunctionParseInfo; /* non-export function prototypes */ @@ -255,7 +255,7 @@ sql_fn_param_ref(ParseState *pstate, ParamRef *pref) * Set up the per-query execution_state records for a SQL function. * * The input is a List of Lists of parsed and rewritten, but not planned, - * querytrees. The sublist structure denotes the original query boundaries. + * querytrees. The sublist structure denotes the original query boundaries. */ static List * init_execution_state(List *queryTree_list, @@ -299,8 +299,8 @@ init_execution_state(List *queryTree_list, ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), /* translator: %s is a SQL statement name */ - errmsg("%s is not allowed in a non-volatile function", - CreateCommandTag(stmt)))); + errmsg("%s is not allowed in a non-volatile function", + CreateCommandTag(stmt)))); /* OK, build the execution_state for this query */ newes = (execution_state *) palloc(sizeof(execution_state)); @@ -311,8 +311,8 @@ init_execution_state(List *queryTree_list, newes->next = NULL; newes->status = F_EXEC_START; - newes->setsResult = false; /* might change below */ - newes->lazyEval = false; /* might change below */ + newes->setsResult = false; /* might change below */ + newes->lazyEval = false; /* might change below */ newes->stmt = stmt; newes->qd = NULL; @@ -442,7 +442,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK) fcache->src = TextDatumGetCString(tmp); /* - * Parse and rewrite the queries in the function text. Use sublists to + * Parse and rewrite the queries in the function text. Use sublists to * keep track of the original query boundaries. But we also build a * "flat" list of the rewritten queries to pass to check_sql_fn_retval. * This is because the last canSetTag query determines the result type @@ -462,7 +462,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK) queryTree_sublist = pg_analyze_and_rewrite_params(parsetree, fcache->src, - (ParserSetupHook) sql_fn_parser_setup, + (ParserSetupHook) sql_fn_parser_setup, fcache->pinfo); queryTree_list = lappend(queryTree_list, queryTree_sublist); flat_query_list = list_concat(flat_query_list, @@ -657,7 +657,7 @@ postquel_sub_params(SQLFunctionCachePtr fcache, { /* sizeof(ParamListInfoData) includes the first array element */ paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) + - (nargs - 1) *sizeof(ParamExternData)); + (nargs - 1) * sizeof(ParamExternData)); /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; @@ -748,8 +748,8 @@ fmgr_sql(PG_FUNCTION_ARGS) execution_state *es; TupleTableSlot *slot; Datum result; - List *eslist; - ListCell *eslc; + List *eslist; + ListCell *eslc; /* * Switch to context in which the fcache lives. This ensures that @@ -847,10 +847,10 @@ fmgr_sql(PG_FUNCTION_ARGS) * * In a non-read-only function, we rely on the fact that we'll never * suspend execution between queries of the function: the only reason to - * suspend execution before completion is if we are returning a row from - * a lazily-evaluated SELECT. So, when first entering this loop, we'll + * suspend execution before completion is if we are returning a row from a + * lazily-evaluated SELECT. So, when first entering this loop, we'll * either start a new query (and push a fresh snapshot) or re-establish - * the active snapshot from the existing query descriptor. If we need to + * the active snapshot from the existing query descriptor. If we need to * start a new query in a subsequent execution of the loop, either we need * a fresh snapshot (and pushed_snapshot is false) or the existing * snapshot is on the active stack and we can just bump its command ID. @@ -927,10 +927,10 @@ fmgr_sql(PG_FUNCTION_ARGS) es = (execution_state *) lfirst(eslc); /* - * Flush the current snapshot so that we will take a new one - * for the new query list. This ensures that new snaps are - * taken at original-query boundaries, matching the behavior - * of interactive execution. + * Flush the current snapshot so that we will take a new one for + * the new query list. This ensures that new snaps are taken at + * original-query boundaries, matching the behavior of interactive + * execution. */ if (pushed_snapshot) { @@ -1183,7 +1183,7 @@ ShutdownSQLFunction(Datum arg) { SQLFunctionCachePtr fcache = (SQLFunctionCachePtr) DatumGetPointer(arg); execution_state *es; - ListCell *lc; + ListCell *lc; foreach(lc, fcache->func_state) { @@ -1415,7 +1415,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, * the function that's calling it. * * XXX Note that if rettype is RECORD, the IsBinaryCoercible check - * will succeed for any composite restype. For the moment we rely on + * will succeed for any composite restype. For the moment we rely on * runtime type checking to catch any discrepancy, but it'd be nice to * do better at parse time. */ @@ -1432,7 +1432,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, rettype, -1, - get_typcollation(rettype), + get_typcollation(rettype), COERCE_DONTCARE); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) @@ -1536,7 +1536,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, tle->expr = (Expr *) makeRelabelType(tle->expr, atttype, -1, - get_typcollation(atttype), + get_typcollation(atttype), COERCE_DONTCARE); /* Relabel is dangerous if sort/group or setop column */ if (tle->ressortgroupref != 0 || parse->setOperations) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 51b1228c26f..47555bab55b 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -199,7 +199,7 @@ typedef struct AggStatePerAggData */ Tuplesortstate *sortstate; /* sort object, if DISTINCT or ORDER BY */ -} AggStatePerAggData; +} AggStatePerAggData; /* * AggStatePerGroupData - per-aggregate-per-group working state @@ -246,7 +246,7 @@ typedef struct AggHashEntryData TupleHashEntryData shared; /* common header for hash table entries */ /* per-aggregate transition status array - must be last! */ AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */ -} AggHashEntryData; /* VARIABLE LENGTH STRUCT */ +} AggHashEntryData; /* VARIABLE LENGTH STRUCT */ static void initialize_aggregates(AggState *aggstate, @@ -827,7 +827,7 @@ build_hash_table(AggState *aggstate) Assert(node->numGroups > 0); entrysize = sizeof(AggHashEntryData) + - (aggstate->numaggs - 1) *sizeof(AggStatePerGroupData); + (aggstate->numaggs - 1) * sizeof(AggStatePerGroupData); aggstate->hashtable = BuildTupleHashTable(node->numCols, node->grpColIdx, @@ -899,7 +899,7 @@ hash_agg_entry_size(int numAggs) /* This must match build_hash_table */ entrysize = sizeof(AggHashEntryData) + - (numAggs - 1) *sizeof(AggStatePerGroupData); + (numAggs - 1) * sizeof(AggStatePerGroupData); entrysize = MAXALIGN(entrysize); /* Account for hashtable overhead (assuming fill factor = 1) */ entrysize += 3 * sizeof(void *); diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c index 90ff0403ab0..4de54ea55f6 100644 --- a/src/backend/executor/nodeBitmapIndexscan.c +++ b/src/backend/executor/nodeBitmapIndexscan.c @@ -307,8 +307,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags) indexstate->biss_NumScanKeys); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to - * the index AM. + * If no run-time keys to calculate, go ahead and pass the scankeys to the + * index AM. */ if (indexstate->biss_NumRuntimeKeys == 0 && indexstate->biss_NumArrayKeys == 0) diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index c4309a981e2..d50489c7f4c 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -40,7 +40,7 @@ static TupleTableSlot * ForeignNext(ForeignScanState *node) { TupleTableSlot *slot; - ForeignScan *plan = (ForeignScan *) node->ss.ps.plan; + ForeignScan *plan = (ForeignScan *) node->ss.ps.plan; ExprContext *econtext = node->ss.ps.ps_ExprContext; MemoryContext oldcontext; diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 295563011fe..1af98c81a69 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -960,13 +960,11 @@ void ExecPrepHashTableForUnmatched(HashJoinState *hjstate) { /* - *---------- - * During this scan we use the HashJoinState fields as follows: + * ---------- During this scan we use the HashJoinState fields as follows: * - * hj_CurBucketNo: next regular bucket to scan - * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums) - * hj_CurTuple: last tuple returned, or NULL to start next bucket - *---------- + * hj_CurBucketNo: next regular bucket to scan hj_CurSkewBucketNo: next + * skew bucket (an index into skewBucketNums) hj_CurTuple: last tuple + * returned, or NULL to start next bucket ---------- */ hjstate->hj_CurBucketNo = 0; hjstate->hj_CurSkewBucketNo = 0; @@ -1003,7 +1001,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) } else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets) { - int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo]; + int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo]; hashTuple = hashtable->skewBucket[j]->tuples; hjstate->hj_CurSkewBucketNo++; @@ -1020,7 +1018,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext) /* insert hashtable's tuple into exec slot */ inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple), hjstate->hj_HashTupleSlot, - false); /* do not pfree */ + false); /* do not pfree */ econtext->ecxt_innertuple = inntuple; /* @@ -1091,7 +1089,7 @@ ExecHashTableResetMatchFlags(HashJoinTable hashtable) /* ... and the same for the skew buckets, if any */ for (i = 0; i < hashtable->nSkewBuckets; i++) { - int j = hashtable->skewBucketNums[i]; + int j = hashtable->skewBucketNums[i]; HashSkewBucket *skewBucket = hashtable->skewBucket[j]; for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next) diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index a6847c956f4..7c02db94adb 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -113,6 +113,7 @@ ExecHashJoin(HashJoinState *node) switch (node->hj_JoinState) { case HJ_BUILD_HASHTABLE: + /* * First time through: build hash table for inner relation. */ @@ -123,12 +124,12 @@ ExecHashJoin(HashJoinState *node) * right/full join, we can quit without building the hash * table. However, for an inner join it is only a win to * check this when the outer relation's startup cost is less - * than the projected cost of building the hash - * table. Otherwise it's best to build the hash table first - * and see if the inner relation is empty. (When it's a left - * join, we should always make this check, since we aren't - * going to be able to skip the join on the strength of an - * empty inner relation anyway.) + * than the projected cost of building the hash table. + * Otherwise it's best to build the hash table first and see + * if the inner relation is empty. (When it's a left join, we + * should always make this check, since we aren't going to be + * able to skip the join on the strength of an empty inner + * relation anyway.) * * If we are rescanning the join, we make use of information * gained on the previous scan: don't bother to try the @@ -185,8 +186,8 @@ ExecHashJoin(HashJoinState *node) return NULL; /* - * need to remember whether nbatch has increased since we began - * scanning the outer relation + * need to remember whether nbatch has increased since we + * began scanning the outer relation */ hashtable->nbatch_outstart = hashtable->nbatch; @@ -202,6 +203,7 @@ ExecHashJoin(HashJoinState *node) /* FALL THRU */ case HJ_NEED_NEW_OUTER: + /* * We don't have an outer tuple, try to get the next one */ @@ -250,7 +252,7 @@ ExecHashJoin(HashJoinState *node) Assert(batchno > hashtable->curbatch); ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot), hashvalue, - &hashtable->outerBatchFile[batchno]); + &hashtable->outerBatchFile[batchno]); /* Loop around, staying in HJ_NEED_NEW_OUTER state */ continue; } @@ -261,6 +263,7 @@ ExecHashJoin(HashJoinState *node) /* FALL THRU */ case HJ_SCAN_BUCKET: + /* * Scan the selected hash bucket for matches to current outer */ @@ -296,8 +299,8 @@ ExecHashJoin(HashJoinState *node) } /* - * In a semijoin, we'll consider returning the first match, - * but after that we're done with this outer tuple. + * In a semijoin, we'll consider returning the first + * match, but after that we're done with this outer tuple. */ if (node->js.jointype == JOIN_SEMI) node->hj_JoinState = HJ_NEED_NEW_OUTER; @@ -320,10 +323,11 @@ ExecHashJoin(HashJoinState *node) break; case HJ_FILL_OUTER_TUPLE: + /* * The current outer tuple has run out of matches, so check - * whether to emit a dummy outer-join tuple. Whether we - * emit one or not, the next state is NEED_NEW_OUTER. + * whether to emit a dummy outer-join tuple. Whether we emit + * one or not, the next state is NEED_NEW_OUTER. */ node->hj_JoinState = HJ_NEED_NEW_OUTER; @@ -354,6 +358,7 @@ ExecHashJoin(HashJoinState *node) break; case HJ_FILL_INNER_TUPLES: + /* * We have finished a batch, but we are doing right/full join, * so any unmatched inner tuples in the hashtable have to be @@ -389,11 +394,12 @@ ExecHashJoin(HashJoinState *node) break; case HJ_NEED_NEW_BATCH: + /* * Try to advance to next batch. Done if there are no more. */ if (!ExecHashJoinNewBatch(node)) - return NULL; /* end of join */ + return NULL; /* end of join */ node->hj_JoinState = HJ_NEED_NEW_OUTER; break; @@ -783,7 +789,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) } if (curbatch >= nbatch) - return false; /* no more batches */ + return false; /* no more batches */ hashtable->curbatch = curbatch; @@ -829,7 +835,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(hashtable->outerBatchFile[curbatch], 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); } return true; @@ -944,14 +950,13 @@ ExecReScanHashJoin(HashJoinState *node) ExecHashTableResetMatchFlags(node->hj_HashTable); /* - * Also, we need to reset our state about the emptiness of - * the outer relation, so that the new scan of the outer will - * update it correctly if it turns out to be empty this time. - * (There's no harm in clearing it now because ExecHashJoin won't - * need the info. In the other cases, where the hash table - * doesn't exist or we are destroying it, we leave this state - * alone because ExecHashJoin will need it the first time - * through.) + * Also, we need to reset our state about the emptiness of the + * outer relation, so that the new scan of the outer will update + * it correctly if it turns out to be empty this time. (There's no + * harm in clearing it now because ExecHashJoin won't need the + * info. In the other cases, where the hash table doesn't exist + * or we are destroying it, we leave this state alone because + * ExecHashJoin will need it the first time through.) */ node->hj_OuterNotEmpty = false; diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 3b8741fc21b..d8e59ca39e5 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -212,7 +212,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, /* * For each run-time key, extract the run-time expression and evaluate - * it with respect to the current context. We then stick the result + * it with respect to the current context. We then stick the result * into the proper scan key. * * Note: the result of the eval could be a pass-by-ref value that's @@ -605,16 +605,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags) indexstate->iss_RelationDesc, estate->es_snapshot, indexstate->iss_NumScanKeys, - indexstate->iss_NumOrderByKeys); + indexstate->iss_NumOrderByKeys); /* - * If no run-time keys to calculate, go ahead and pass the scankeys to - * the index AM. + * If no run-time keys to calculate, go ahead and pass the scankeys to the + * index AM. */ if (indexstate->iss_NumRuntimeKeys == 0) index_rescan(indexstate->iss_ScanDesc, indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys, - indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys); + indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys); /* * all done. @@ -703,11 +703,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid, scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData)); /* - * runtime_keys array is dynamically resized as needed. We handle it - * this way so that the same runtime keys array can be shared between - * indexquals and indexorderbys, which will be processed in separate - * calls of this function. Caller must be sure to pass in NULL/0 for - * first call. + * runtime_keys array is dynamically resized as needed. We handle it this + * way so that the same runtime keys array can be shared between + * indexquals and indexorderbys, which will be processed in separate calls + * of this function. Caller must be sure to pass in NULL/0 for first + * call. */ runtime_keys = *runtimeKeys; n_runtime_keys = max_runtime_keys = *numRuntimeKeys; diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index edbe0558b70..85d1a6e27f1 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -346,14 +346,14 @@ pass_down_bound(LimitState *node, PlanState *child_node) else if (IsA(child_node, ResultState)) { /* - * An extra consideration here is that if the Result is projecting - * a targetlist that contains any SRFs, we can't assume that every - * input tuple generates an output tuple, so a Sort underneath - * might need to return more than N tuples to satisfy LIMIT N. - * So we cannot use bounded sort. + * An extra consideration here is that if the Result is projecting a + * targetlist that contains any SRFs, we can't assume that every input + * tuple generates an output tuple, so a Sort underneath might need to + * return more than N tuples to satisfy LIMIT N. So we cannot use + * bounded sort. * - * If Result supported qual checking, we'd have to punt on seeing - * a qual, too. Note that having a resconstantqual is not a + * If Result supported qual checking, we'd have to punt on seeing a + * qual, too. Note that having a resconstantqual is not a * showstopper: if that fails we're not getting any rows at all. */ if (outerPlanState(child_node) && diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 2e080088071..d71278ebd72 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -291,7 +291,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* * Locate the ExecRowMark(s) that this node is responsible for, and - * construct ExecAuxRowMarks for them. (InitPlan should already have + * construct ExecAuxRowMarks for them. (InitPlan should already have * built the global list of ExecRowMarks.) */ lrstate->lr_arowMarks = NIL; diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 73920f21c8c..4ebe0cbe033 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -48,8 +48,8 @@ * contains integers which index into the slots array. These typedefs try to * clear it up, but they're only documentation. */ -typedef int SlotNumber; -typedef int HeapPosition; +typedef int SlotNumber; +typedef int HeapPosition; static void heap_insert_slot(MergeAppendState *node, SlotNumber new_slot); static void heap_siftup_slot(MergeAppendState *node); @@ -128,13 +128,13 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags) * initialize sort-key information */ mergestate->ms_nkeys = node->numCols; - mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols); + mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols); for (i = 0; i < node->numCols; i++) { - Oid sortFunction; - bool reverse; - int flags; + Oid sortFunction; + bool reverse; + int flags; if (!get_compare_function_for_ordering_op(node->sortOperators[i], &sortFunction, &reverse)) @@ -187,8 +187,8 @@ ExecMergeAppend(MergeAppendState *node) if (!node->ms_initialized) { /* - * First time through: pull the first tuple from each subplan, - * and set up the heap. + * First time through: pull the first tuple from each subplan, and set + * up the heap. */ for (i = 0; i < node->ms_nplans; i++) { @@ -243,7 +243,7 @@ heap_insert_slot(MergeAppendState *node, SlotNumber new_slot) j = node->ms_heap_size++; /* j is where the "hole" is */ while (j > 0) { - int i = (j-1)/2; + int i = (j - 1) / 2; if (heap_compare_slots(node, new_slot, node->ms_heap[i]) >= 0) break; @@ -269,11 +269,11 @@ heap_siftup_slot(MergeAppendState *node) i = 0; /* i is where the "hole" is */ for (;;) { - int j = 2 * i + 1; + int j = 2 * i + 1; if (j >= n) break; - if (j+1 < n && heap_compare_slots(node, heap[j], heap[j+1]) > 0) + if (j + 1 < n && heap_compare_slots(node, heap[j], heap[j + 1]) > 0) j++; if (heap_compare_slots(node, heap[n], heap[j]) <= 0) break; @@ -298,13 +298,13 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2) for (nkey = 0; nkey < node->ms_nkeys; nkey++) { - ScanKey scankey = node->ms_scankeys + nkey; - AttrNumber attno = scankey->sk_attno; - Datum datum1, - datum2; - bool isNull1, - isNull2; - int32 compare; + ScanKey scankey = node->ms_scankeys + nkey; + AttrNumber attno = scankey->sk_attno; + Datum datum1, + datum2; + bool isNull1, + isNull2; + int32 compare; datum1 = slot_getattr(s1, attno, &isNull1); datum2 = slot_getattr(s2, attno, &isNull2); diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 75c3a645359..ce5462e961e 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -143,7 +143,7 @@ typedef struct MergeJoinClauseData bool reverse; /* if true, negate the cmpfn's output */ bool nulls_first; /* if true, nulls sort low */ FmgrInfo cmpfinfo; -} MergeJoinClauseData; +} MergeJoinClauseData; /* Result type for MJEvalOuterValues and MJEvalInnerValues */ typedef enum diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index f10f70a17d3..c0eab4bf0db 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -544,7 +544,7 @@ ExecUpdate(ItemPointer tupleid, * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to - * redo triggers, however. If there are any BEFORE triggers then + * redo triggers, however. If there are any BEFORE triggers then * trigger.c will have done heap_lock_tuple to lock the correct tuple, * so there's no need to do them again.) */ @@ -608,11 +608,10 @@ lreplace:; /* * Note: instead of having to update the old index tuples associated - * with the heap tuple, all we do is form and insert new index - * tuples. This is because UPDATEs are actually DELETEs and INSERTs, - * and index tuple deletion is done later by VACUUM (see notes in - * ExecDelete). All we do here is insert new index tuples. -cim - * 9/27/89 + * with the heap tuple, all we do is form and insert new index tuples. + * This is because UPDATEs are actually DELETEs and INSERTs, and index + * tuple deletion is done later by VACUUM (see notes in ExecDelete). + * All we do here is insert new index tuples. -cim 9/27/89 */ /* @@ -713,7 +712,7 @@ ExecModifyTable(ModifyTableState *node) TupleTableSlot *planSlot; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; - HeapTupleHeader oldtuple = NULL; + HeapTupleHeader oldtuple = NULL; /* * If we've already completed processing, don't try to do more. We need @@ -740,7 +739,7 @@ ExecModifyTable(ModifyTableState *node) /* * es_result_relation_info must point to the currently active result - * relation while we are within this ModifyTable node. Even though + * relation while we are within this ModifyTable node. Even though * ModifyTable nodes can't be nested statically, they can be nested * dynamically (since our subplan could include a reference to a modifying * CTE). So we have to save and restore the caller's value. @@ -756,7 +755,7 @@ ExecModifyTable(ModifyTableState *node) for (;;) { /* - * Reset the per-output-tuple exprcontext. This is needed because + * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly * to do this below the top level of the plan, however. We might need * to rethink this later. @@ -806,7 +805,8 @@ ExecModifyTable(ModifyTableState *node) elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(datum); - tuple_ctid = *tupleid; /* be sure we don't free ctid!! */ + tuple_ctid = *tupleid; /* be sure we don't free + * ctid!! */ tupleid = &tuple_ctid; } else @@ -836,11 +836,11 @@ ExecModifyTable(ModifyTableState *node) break; case CMD_UPDATE: slot = ExecUpdate(tupleid, oldtuple, slot, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; case CMD_DELETE: slot = ExecDelete(tupleid, oldtuple, planSlot, - &node->mt_epqstate, estate, node->canSetTag); + &node->mt_epqstate, estate, node->canSetTag); break; default: elog(ERROR, "unknown operation"); @@ -922,9 +922,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * call ExecInitNode on each of the plans to be executed and save the - * results into the array "mt_plans". This is also a convenient place - * to verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. Note we *must* set + * results into the array "mt_plans". This is also a convenient place to + * verify that the proposed target relations are valid and open their + * indexes for insertion of new index entries. Note we *must* set * estate->es_result_relation_info correctly while we initialize each * sub-plan; ExecContextForcesOids depends on that! */ @@ -944,7 +944,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this + * index entries for the tuples we add/update. We need not do this * for a DELETE, however, since deletion doesn't affect indexes. */ if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex && @@ -1147,10 +1147,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it * to estate->es_auxmodifytables so that it will be run to completion by * ExecPostprocessPlan. (It'd actually work fine to add the primary - * ModifyTable node too, but there's no need.) Note the use of lcons - * not lappend: we need later-initialized ModifyTable nodes to be shut - * down before earlier ones. This ensures that we don't throw away - * RETURNING rows that need to be seen by a later CTE subplan. + * ModifyTable node too, but there's no need.) Note the use of lcons not + * lappend: we need later-initialized ModifyTable nodes to be shut down + * before earlier ones. This ensures that we don't throw away RETURNING + * rows that need to be seen by a later CTE subplan. */ if (!mtstate->canSetTag) estate->es_auxmodifytables = lcons(mtstate, diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c index 4893a6ea6d6..e98bc0f5a30 100644 --- a/src/backend/executor/nodeNestloop.c +++ b/src/backend/executor/nodeNestloop.c @@ -137,9 +137,8 @@ ExecNestLoop(NestLoopState *node) node->nl_MatchedOuter = false; /* - * fetch the values of any outer Vars that must be passed to - * the inner scan, and store them in the appropriate PARAM_EXEC - * slots. + * fetch the values of any outer Vars that must be passed to the + * inner scan, and store them in the appropriate PARAM_EXEC slots. */ foreach(lc, nl->nestParams) { @@ -330,9 +329,9 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags) * * If we have no parameters to pass into the inner rel from the outer, * tell the inner child that cheap rescans would be good. If we do have - * such parameters, then there is no point in REWIND support at all in - * the inner child, because it will always be rescanned with fresh - * parameter values. + * such parameters, then there is no point in REWIND support at all in the + * inner child, because it will always be rescanned with fresh parameter + * values. */ outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags); if (node->nestParams == NIL) diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index 84c051854bb..12e1b9a5857 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -29,7 +29,7 @@ typedef struct RUHashEntryData *RUHashEntry; typedef struct RUHashEntryData { TupleHashEntryData shared; /* common header for hash table entries */ -} RUHashEntryData; +} RUHashEntryData; /* diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index aa352d7822e..9106f148730 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -76,7 +76,7 @@ typedef struct SetOpHashEntryData { TupleHashEntryData shared; /* common header for hash table entries */ SetOpStatePerGroupData pergroup; -} SetOpHashEntryData; +} SetOpHashEntryData; static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate); diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 5680efeb69e..25d9298cefc 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -92,7 +92,7 @@ typedef struct WindowStatePerFuncData int aggno; /* if so, index of its PerAggData */ WindowObject winobj; /* object used in window function API */ -} WindowStatePerFuncData; +} WindowStatePerFuncData; /* * For plain aggregate window functions, we also have one of these. diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index a717a0deead..6e723ca092b 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1787,8 +1787,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * snapshot != InvalidSnapshot, read_only = true: use exactly the given * snapshot. * - * snapshot != InvalidSnapshot, read_only = false: use the given - * snapshot, modified by advancing its command ID before each querytree. + * snapshot != InvalidSnapshot, read_only = false: use the given snapshot, + * modified by advancing its command ID before each querytree. * * snapshot == InvalidSnapshot, read_only = true: use the entry-time * ActiveSnapshot, if any (if there isn't one, we run with no snapshot). @@ -1797,8 +1797,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, * snapshot for each user command, and advance its command ID before each * querytree within the command. * - * In the first two cases, we can just push the snap onto the stack - * once for the whole plan list. + * In the first two cases, we can just push the snap onto the stack once + * for the whole plan list. */ if (snapshot != InvalidSnapshot) { @@ -2028,7 +2028,7 @@ _SPI_convert_params(int nargs, Oid *argtypes, /* sizeof(ParamListInfoData) includes the first array element */ paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) + - (nargs - 1) *sizeof(ParamExternData)); + (nargs - 1) * sizeof(ParamExternData)); /* we have static list of params, so no hooks needed */ paramLI->paramFetch = NULL; paramLI->paramFetchArg = NULL; |