diff options
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/optimizer/path/clausesel.c | 15 | ||||
-rw-r--r-- | src/backend/optimizer/path/costsize.c | 50 | ||||
-rw-r--r-- | src/backend/optimizer/util/clauses.c | 27 | ||||
-rw-r--r-- | src/backend/optimizer/util/pathnode.c | 2 | ||||
-rw-r--r-- | src/backend/optimizer/util/plancat.c | 182 | ||||
-rw-r--r-- | src/backend/utils/adt/arrayfuncs.c | 34 | ||||
-rw-r--r-- | src/backend/utils/adt/int.c | 74 | ||||
-rw-r--r-- | src/backend/utils/adt/int8.c | 73 | ||||
-rw-r--r-- | src/backend/utils/adt/selfuncs.c | 13 | ||||
-rw-r--r-- | src/backend/utils/cache/lsyscache.c | 45 |
10 files changed, 444 insertions, 71 deletions
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index abca03b805e..e8142bddf02 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -762,6 +762,21 @@ clause_selectivity(PlannerInfo *root, if (IsA(clause, DistinctExpr)) s1 = 1.0 - s1; } + else if (is_funcclause(clause)) + { + FuncExpr *funcclause = (FuncExpr *) clause; + + /* Try to get an estimate from the support function, if any */ + s1 = function_selectivity(root, + funcclause->funcid, + funcclause->args, + funcclause->inputcollid, + treat_as_join_clause(clause, rinfo, + varRelid, sjinfo), + varRelid, + jointype, + sjinfo); + } else if (IsA(clause, ScalarArrayOpExpr)) { /* Use node specific selectivity calculation function */ diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 1057ddaa3e5..beee50ec135 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -2112,9 +2112,9 @@ cost_agg(Path *path, PlannerInfo *root, /* * The transCost.per_tuple component of aggcosts should be charged once * per input tuple, corresponding to the costs of evaluating the aggregate - * transfns and their input expressions (with any startup cost of course - * charged but once). The finalCost component is charged once per output - * tuple, corresponding to the costs of evaluating the finalfns. + * transfns and their input expressions. The finalCost.per_tuple component + * is charged once per output tuple, corresponding to the costs of + * evaluating the finalfns. Startup costs are of course charged but once. * * If we are grouping, we charge an additional cpu_operator_cost per * grouping column per input tuple for grouping comparisons. @@ -2136,7 +2136,8 @@ cost_agg(Path *path, PlannerInfo *root, startup_cost = input_total_cost; startup_cost += aggcosts->transCost.startup; startup_cost += aggcosts->transCost.per_tuple * input_tuples; - startup_cost += aggcosts->finalCost; + startup_cost += aggcosts->finalCost.startup; + startup_cost += aggcosts->finalCost.per_tuple; /* we aren't grouping */ total_cost = startup_cost + cpu_tuple_cost; output_tuples = 1; @@ -2155,7 +2156,8 @@ cost_agg(Path *path, PlannerInfo *root, total_cost += aggcosts->transCost.startup; total_cost += aggcosts->transCost.per_tuple * input_tuples; total_cost += (cpu_operator_cost * numGroupCols) * input_tuples; - total_cost += aggcosts->finalCost * numGroups; + total_cost += aggcosts->finalCost.startup; + total_cost += aggcosts->finalCost.per_tuple * numGroups; total_cost += cpu_tuple_cost * numGroups; output_tuples = numGroups; } @@ -2168,8 +2170,9 @@ cost_agg(Path *path, PlannerInfo *root, startup_cost += aggcosts->transCost.startup; startup_cost += aggcosts->transCost.per_tuple * input_tuples; startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples; + startup_cost += aggcosts->finalCost.startup; total_cost = startup_cost; - total_cost += aggcosts->finalCost * numGroups; + total_cost += aggcosts->finalCost.per_tuple * numGroups; total_cost += cpu_tuple_cost * numGroups; output_tuples = numGroups; } @@ -2234,7 +2237,11 @@ cost_windowagg(Path *path, PlannerInfo *root, Cost wfunccost; QualCost argcosts; - wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost; + argcosts.startup = argcosts.per_tuple = 0; + add_function_cost(root, wfunc->winfnoid, (Node *) wfunc, + &argcosts); + startup_cost += argcosts.startup; + wfunccost = argcosts.per_tuple; /* also add the input expressions' cost to per-input-row costs */ cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root); @@ -3864,8 +3871,8 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) */ if (IsA(node, FuncExpr)) { - context->total.per_tuple += - get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost; + add_function_cost(context->root, ((FuncExpr *) node)->funcid, node, + &context->total); } else if (IsA(node, OpExpr) || IsA(node, DistinctExpr) || @@ -3873,8 +3880,8 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) { /* rely on struct equivalence to treat these all alike */ set_opfuncid((OpExpr *) node); - context->total.per_tuple += - get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost; + add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node, + &context->total); } else if (IsA(node, ScalarArrayOpExpr)) { @@ -3884,10 +3891,15 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) */ ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node; Node *arraynode = (Node *) lsecond(saop->args); + QualCost sacosts; set_sa_opfuncid(saop); - context->total.per_tuple += get_func_cost(saop->opfuncid) * - cpu_operator_cost * estimate_array_length(arraynode) * 0.5; + sacosts.startup = sacosts.per_tuple = 0; + add_function_cost(context->root, saop->opfuncid, NULL, + &sacosts); + context->total.startup += sacosts.startup; + context->total.per_tuple += sacosts.per_tuple * + estimate_array_length(arraynode) * 0.5; } else if (IsA(node, Aggref) || IsA(node, WindowFunc)) @@ -3913,11 +3925,13 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) /* check the result type's input function */ getTypeInputInfo(iocoerce->resulttype, &iofunc, &typioparam); - context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost; + add_function_cost(context->root, iofunc, NULL, + &context->total); /* check the input type's output function */ getTypeOutputInfo(exprType((Node *) iocoerce->arg), &iofunc, &typisvarlena); - context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost; + add_function_cost(context->root, iofunc, NULL, + &context->total); } else if (IsA(node, ArrayCoerceExpr)) { @@ -3941,8 +3955,8 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) { Oid opid = lfirst_oid(lc); - context->total.per_tuple += get_func_cost(get_opcode(opid)) * - cpu_operator_cost; + add_function_cost(context->root, get_opcode(opid), NULL, + &context->total); } } else if (IsA(node, MinMaxExpr) || @@ -4941,7 +4955,7 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel) foreach(lc, rte->functions) { RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc); - double ntup = expression_returns_set_rows(rtfunc->funcexpr); + double ntup = expression_returns_set_rows(root, rtfunc->funcexpr); if (ntup > rel->tuples) rel->tuples = ntup; diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 002c29a5f5d..86e4753a5b3 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -36,8 +36,8 @@ #include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/optimizer.h" +#include "optimizer/plancat.h" #include "optimizer/planmain.h" -#include "optimizer/prep.h" #include "parser/analyze.h" #include "parser/parse_agg.h" #include "parser/parse_coerce.h" @@ -343,19 +343,24 @@ get_agg_clause_costs_walker(Node *node, get_agg_clause_costs_context *context) if (DO_AGGSPLIT_COMBINE(context->aggsplit)) { /* charge for combining previously aggregated states */ - costs->transCost.per_tuple += get_func_cost(aggcombinefn) * cpu_operator_cost; + add_function_cost(context->root, aggcombinefn, NULL, + &costs->transCost); } else - costs->transCost.per_tuple += get_func_cost(aggtransfn) * cpu_operator_cost; + add_function_cost(context->root, aggtransfn, NULL, + &costs->transCost); if (DO_AGGSPLIT_DESERIALIZE(context->aggsplit) && OidIsValid(aggdeserialfn)) - costs->transCost.per_tuple += get_func_cost(aggdeserialfn) * cpu_operator_cost; + add_function_cost(context->root, aggdeserialfn, NULL, + &costs->transCost); if (DO_AGGSPLIT_SERIALIZE(context->aggsplit) && OidIsValid(aggserialfn)) - costs->finalCost += get_func_cost(aggserialfn) * cpu_operator_cost; + add_function_cost(context->root, aggserialfn, NULL, + &costs->finalCost); if (!DO_AGGSPLIT_SKIPFINAL(context->aggsplit) && OidIsValid(aggfinalfn)) - costs->finalCost += get_func_cost(aggfinalfn) * cpu_operator_cost; + add_function_cost(context->root, aggfinalfn, NULL, + &costs->finalCost); /* * These costs are incurred only by the initial aggregate node, so we @@ -392,8 +397,8 @@ get_agg_clause_costs_walker(Node *node, get_agg_clause_costs_context *context) { cost_qual_eval_node(&argcosts, (Node *) aggref->aggdirectargs, context->root); - costs->transCost.startup += argcosts.startup; - costs->finalCost += argcosts.per_tuple; + costs->finalCost.startup += argcosts.startup; + costs->finalCost.per_tuple += argcosts.per_tuple; } /* @@ -561,7 +566,7 @@ find_window_functions_walker(Node *node, WindowFuncLists *lists) * Note: keep this in sync with expression_returns_set() in nodes/nodeFuncs.c. */ double -expression_returns_set_rows(Node *clause) +expression_returns_set_rows(PlannerInfo *root, Node *clause) { if (clause == NULL) return 1.0; @@ -570,7 +575,7 @@ expression_returns_set_rows(Node *clause) FuncExpr *expr = (FuncExpr *) clause; if (expr->funcretset) - return clamp_row_est(get_func_rows(expr->funcid)); + return clamp_row_est(get_function_rows(root, expr->funcid, clause)); } if (IsA(clause, OpExpr)) { @@ -579,7 +584,7 @@ expression_returns_set_rows(Node *clause) if (expr->opretset) { set_opfuncid(expr); - return clamp_row_est(get_func_rows(expr->opfuncid)); + return clamp_row_est(get_function_rows(root, expr->opfuncid, clause)); } } return 1.0; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index a3e64110d36..169e51e7921 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -2711,7 +2711,7 @@ create_set_projection_path(PlannerInfo *root, Node *node = (Node *) lfirst(lc); double itemrows; - itemrows = expression_returns_set_rows(node); + itemrows = expression_returns_set_rows(root, node); if (tlist_rows < itemrows) tlist_rows = itemrows; } diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 3efa1bdc1a4..d6dc83ca809 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -29,10 +29,12 @@ #include "catalog/heap.h" #include "catalog/partition.h" #include "catalog/pg_am.h" +#include "catalog/pg_proc.h" #include "catalog/pg_statistic_ext.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/makefuncs.h" +#include "nodes/supportnodes.h" #include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/optimizer.h" @@ -1772,6 +1774,8 @@ restriction_selectivity(PlannerInfo *root, * Returns the selectivity of a specified join operator clause. * This code executes registered procedures stored in the * operator relation, by calling the function manager. + * + * See clause_selectivity() for the meaning of the additional parameters. */ Selectivity join_selectivity(PlannerInfo *root, @@ -1806,6 +1810,184 @@ join_selectivity(PlannerInfo *root, } /* + * function_selectivity + * + * Returns the selectivity of a specified boolean function clause. + * This code executes registered procedures stored in the + * pg_proc relation, by calling the function manager. + * + * See clause_selectivity() for the meaning of the additional parameters. + */ +Selectivity +function_selectivity(PlannerInfo *root, + Oid funcid, + List *args, + Oid inputcollid, + bool is_join, + int varRelid, + JoinType jointype, + SpecialJoinInfo *sjinfo) +{ + RegProcedure prosupport = get_func_support(funcid); + SupportRequestSelectivity req; + SupportRequestSelectivity *sresult; + + /* + * If no support function is provided, use our historical default + * estimate, 0.3333333. This seems a pretty unprincipled choice, but + * Postgres has been using that estimate for function calls since 1992. + * The hoariness of this behavior suggests that we should not be in too + * much hurry to use another value. + */ + if (!prosupport) + return (Selectivity) 0.3333333; + + req.type = T_SupportRequestSelectivity; + req.root = root; + req.funcid = funcid; + req.args = args; + req.inputcollid = inputcollid; + req.is_join = is_join; + req.varRelid = varRelid; + req.jointype = jointype; + req.sjinfo = sjinfo; + req.selectivity = -1; /* to catch failure to set the value */ + + sresult = (SupportRequestSelectivity *) + DatumGetPointer(OidFunctionCall1(prosupport, + PointerGetDatum(&req))); + + /* If support function fails, use default */ + if (sresult != &req) + return (Selectivity) 0.3333333; + + if (req.selectivity < 0.0 || req.selectivity > 1.0) + elog(ERROR, "invalid function selectivity: %f", req.selectivity); + + return (Selectivity) req.selectivity; +} + +/* + * add_function_cost + * + * Get an estimate of the execution cost of a function, and *add* it to + * the contents of *cost. The estimate may include both one-time and + * per-tuple components, since QualCost does. + * + * The funcid must always be supplied. If it is being called as the + * implementation of a specific parsetree node (FuncExpr, OpExpr, + * WindowFunc, etc), pass that as "node", else pass NULL. + * + * In some usages root might be NULL, too. + */ +void +add_function_cost(PlannerInfo *root, Oid funcid, Node *node, + QualCost *cost) +{ + HeapTuple proctup; + Form_pg_proc procform; + + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(proctup)) + elog(ERROR, "cache lookup failed for function %u", funcid); + procform = (Form_pg_proc) GETSTRUCT(proctup); + + if (OidIsValid(procform->prosupport)) + { + SupportRequestCost req; + SupportRequestCost *sresult; + + req.type = T_SupportRequestCost; + req.root = root; + req.funcid = funcid; + req.node = node; + + /* Initialize cost fields so that support function doesn't have to */ + req.startup = 0; + req.per_tuple = 0; + + sresult = (SupportRequestCost *) + DatumGetPointer(OidFunctionCall1(procform->prosupport, + PointerGetDatum(&req))); + + if (sresult == &req) + { + /* Success, so accumulate support function's estimate into *cost */ + cost->startup += req.startup; + cost->per_tuple += req.per_tuple; + ReleaseSysCache(proctup); + return; + } + } + + /* No support function, or it failed, so rely on procost */ + cost->per_tuple += procform->procost * cpu_operator_cost; + + ReleaseSysCache(proctup); +} + +/* + * get_function_rows + * + * Get an estimate of the number of rows returned by a set-returning function. + * + * The funcid must always be supplied. In current usage, the calling node + * will always be supplied, and will be either a FuncExpr or OpExpr. + * But it's a good idea to not fail if it's NULL. + * + * In some usages root might be NULL, too. + * + * Note: this returns the unfiltered result of the support function, if any. + * It's usually a good idea to apply clamp_row_est() to the result, but we + * leave it to the caller to do so. + */ +double +get_function_rows(PlannerInfo *root, Oid funcid, Node *node) +{ + HeapTuple proctup; + Form_pg_proc procform; + double result; + + proctup = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); + if (!HeapTupleIsValid(proctup)) + elog(ERROR, "cache lookup failed for function %u", funcid); + procform = (Form_pg_proc) GETSTRUCT(proctup); + + Assert(procform->proretset); /* else caller error */ + + if (OidIsValid(procform->prosupport)) + { + SupportRequestRows req; + SupportRequestRows *sresult; + + req.type = T_SupportRequestRows; + req.root = root; + req.funcid = funcid; + req.node = node; + + req.rows = 0; /* just for sanity */ + + sresult = (SupportRequestRows *) + DatumGetPointer(OidFunctionCall1(procform->prosupport, + PointerGetDatum(&req))); + + if (sresult == &req) + { + /* Success */ + ReleaseSysCache(proctup); + return req.rows; + } + } + + /* No support function, or it failed, so rely on prorows */ + result = procform->prorows; + + ReleaseSysCache(proctup); + + return result; +} + +/* * has_unique_index * * Detect whether there is a unique index on the specified attribute diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index a785361fd07..5b2917d1594 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -22,12 +22,16 @@ #include "catalog/pg_type.h" #include "funcapi.h" #include "libpq/pqformat.h" +#include "nodes/nodeFuncs.h" +#include "nodes/supportnodes.h" +#include "optimizer/optimizer.h" #include "utils/array.h" #include "utils/arrayaccess.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/memutils.h" +#include "utils/selfuncs.h" #include "utils/typcache.h" @@ -6025,6 +6029,36 @@ array_unnest(PG_FUNCTION_ARGS) } } +/* + * Planner support function for array_unnest(anyarray) + */ +Datum +array_unnest_support(PG_FUNCTION_ARGS) +{ + Node *rawreq = (Node *) PG_GETARG_POINTER(0); + Node *ret = NULL; + + if (IsA(rawreq, SupportRequestRows)) + { + /* Try to estimate the number of rows returned */ + SupportRequestRows *req = (SupportRequestRows *) rawreq; + + if (is_funcclause(req->node)) /* be paranoid */ + { + List *args = ((FuncExpr *) req->node)->args; + Node *arg1; + + /* We can use estimated argument values here */ + arg1 = estimate_expression_value(req->root, linitial(args)); + + req->rows = estimate_array_length(arg1); + ret = (Node *) req; + } + } + + PG_RETURN_POINTER(ret); +} + /* * array_replace/array_remove support diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c index ad8e6d02ee4..04825fc77de 100644 --- a/src/backend/utils/adt/int.c +++ b/src/backend/utils/adt/int.c @@ -30,11 +30,15 @@ #include <ctype.h> #include <limits.h> +#include <math.h> #include "catalog/pg_type.h" #include "common/int.h" #include "funcapi.h" #include "libpq/pqformat.h" +#include "nodes/nodeFuncs.h" +#include "nodes/supportnodes.h" +#include "optimizer/optimizer.h" #include "utils/array.h" #include "utils/builtins.h" @@ -1427,3 +1431,73 @@ generate_series_step_int4(PG_FUNCTION_ARGS) /* do when there is no more left */ SRF_RETURN_DONE(funcctx); } + +/* + * Planner support function for generate_series(int4, int4 [, int4]) + */ +Datum +generate_series_int4_support(PG_FUNCTION_ARGS) +{ + Node *rawreq = (Node *) PG_GETARG_POINTER(0); + Node *ret = NULL; + + if (IsA(rawreq, SupportRequestRows)) + { + /* Try to estimate the number of rows returned */ + SupportRequestRows *req = (SupportRequestRows *) rawreq; + + if (is_funcclause(req->node)) /* be paranoid */ + { + List *args = ((FuncExpr *) req->node)->args; + Node *arg1, + *arg2, + *arg3; + + /* We can use estimated argument values here */ + arg1 = estimate_expression_value(req->root, linitial(args)); + arg2 = estimate_expression_value(req->root, lsecond(args)); + if (list_length(args) >= 3) + arg3 = estimate_expression_value(req->root, lthird(args)); + else + arg3 = NULL; + + /* + * If any argument is constant NULL, we can safely assume that + * zero rows are returned. Otherwise, if they're all non-NULL + * constants, we can calculate the number of rows that will be + * returned. Use double arithmetic to avoid overflow hazards. + */ + if ((IsA(arg1, Const) && + ((Const *) arg1)->constisnull) || + (IsA(arg2, Const) && + ((Const *) arg2)->constisnull) || + (arg3 != NULL && IsA(arg3, Const) && + ((Const *) arg3)->constisnull)) + { + req->rows = 0; + ret = (Node *) req; + } + else if (IsA(arg1, Const) && + IsA(arg2, Const) && + (arg3 == NULL || IsA(arg3, Const))) + { + double start, + finish, + step; + + start = DatumGetInt32(((Const *) arg1)->constvalue); + finish = DatumGetInt32(((Const *) arg2)->constvalue); + step = arg3 ? DatumGetInt32(((Const *) arg3)->constvalue) : 1; + + /* This equation works for either sign of step */ + if (step != 0) + { + req->rows = floor((finish - start + step) / step); + ret = (Node *) req; + } + } + } + } + + PG_RETURN_POINTER(ret); +} diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index d16cc9e574b..0ff9394a2fb 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -20,6 +20,9 @@ #include "common/int.h" #include "funcapi.h" #include "libpq/pqformat.h" +#include "nodes/nodeFuncs.h" +#include "nodes/supportnodes.h" +#include "optimizer/optimizer.h" #include "utils/int8.h" #include "utils/builtins.h" @@ -1373,3 +1376,73 @@ generate_series_step_int8(PG_FUNCTION_ARGS) /* do when there is no more left */ SRF_RETURN_DONE(funcctx); } + +/* + * Planner support function for generate_series(int8, int8 [, int8]) + */ +Datum +generate_series_int8_support(PG_FUNCTION_ARGS) +{ + Node *rawreq = (Node *) PG_GETARG_POINTER(0); + Node *ret = NULL; + + if (IsA(rawreq, SupportRequestRows)) + { + /* Try to estimate the number of rows returned */ + SupportRequestRows *req = (SupportRequestRows *) rawreq; + + if (is_funcclause(req->node)) /* be paranoid */ + { + List *args = ((FuncExpr *) req->node)->args; + Node *arg1, + *arg2, + *arg3; + + /* We can use estimated argument values here */ + arg1 = estimate_expression_value(req->root, linitial(args)); + arg2 = estimate_expression_value(req->root, lsecond(args)); + if (list_length(args) >= 3) + arg3 = estimate_expression_value(req->root, lthird(args)); + else + arg3 = NULL; + + /* + * If any argument is constant NULL, we can safely assume that + * zero rows are returned. Otherwise, if they're all non-NULL + * constants, we can calculate the number of rows that will be + * returned. Use double arithmetic to avoid overflow hazards. + */ + if ((IsA(arg1, Const) && + ((Const *) arg1)->constisnull) || + (IsA(arg2, Const) && + ((Const *) arg2)->constisnull) || + (arg3 != NULL && IsA(arg3, Const) && + ((Const *) arg3)->constisnull)) + { + req->rows = 0; + ret = (Node *) req; + } + else if (IsA(arg1, Const) && + IsA(arg2, Const) && + (arg3 == NULL || IsA(arg3, Const))) + { + double start, + finish, + step; + + start = DatumGetInt64(((Const *) arg1)->constvalue); + finish = DatumGetInt64(((Const *) arg2)->constvalue); + step = arg3 ? DatumGetInt64(((Const *) arg3)->constvalue) : 1; + + /* This equation works for either sign of step */ + if (step != 0) + { + req->rows = floor((finish - start + step) / step); + ret = (Node *) req; + } + } + } + } + + PG_RETURN_POINTER(ret); +} diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 74fafc64f3e..1ef6faecd1e 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -1577,17 +1577,6 @@ boolvarsel(PlannerInfo *root, Node *arg, int varRelid) selec = var_eq_const(&vardata, BooleanEqualOperator, BoolGetDatum(true), false, true, false); } - else if (is_funcclause(arg)) - { - /* - * If we have no stats and it's a function call, estimate 0.3333333. - * This seems a pretty unprincipled choice, but Postgres has been - * using that estimate for function calls since 1992. The hoariness - * of this behavior suggests that we should not be in too much hurry - * to use another value. - */ - selec = 0.3333333; - } else { /* Otherwise, the default estimate is 0.5 */ @@ -3502,7 +3491,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * pointless to worry too much about this without much better * estimates for SRF output rowcounts than we have today.) */ - this_srf_multiplier = expression_returns_set_rows(groupexpr); + this_srf_multiplier = expression_returns_set_rows(root, groupexpr); if (srf_multiplier < this_srf_multiplier) srf_multiplier = this_srf_multiplier; diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index fba0ee8b847..e88c45d268a 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -1605,41 +1605,28 @@ get_func_leakproof(Oid funcid) } /* - * get_func_cost - * Given procedure id, return the function's procost field. - */ -float4 -get_func_cost(Oid funcid) -{ - HeapTuple tp; - float4 result; - - tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for function %u", funcid); - - result = ((Form_pg_proc) GETSTRUCT(tp))->procost; - ReleaseSysCache(tp); - return result; -} - -/* - * get_func_rows - * Given procedure id, return the function's prorows field. + * get_func_support + * + * Returns the support function OID associated with a given function, + * or InvalidOid if there is none. */ -float4 -get_func_rows(Oid funcid) +RegProcedure +get_func_support(Oid funcid) { HeapTuple tp; - float4 result; tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid)); - if (!HeapTupleIsValid(tp)) - elog(ERROR, "cache lookup failed for function %u", funcid); + if (HeapTupleIsValid(tp)) + { + Form_pg_proc functup = (Form_pg_proc) GETSTRUCT(tp); + RegProcedure result; - result = ((Form_pg_proc) GETSTRUCT(tp))->prorows; - ReleaseSysCache(tp); - return result; + result = functup->prosupport; + ReleaseSysCache(tp); + return result; + } + else + return (RegProcedure) InvalidOid; } /* ---------- RELATION CACHE ---------- */ |