summaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
authorBruce Momjian2012-06-10 19:20:04 +0000
committerBruce Momjian2012-06-10 19:20:04 +0000
commit927d61eeff78363ea3938c818d07e511ebaf75cf (patch)
tree2f0bcecf53327f76272a8ce690fa62505520fab9 /src/backend/utils
parent60801944fa105252b48ea5688d47dfc05c695042 (diff)
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/acl.c6
-rw-r--r--src/backend/utils/adt/array_selfuncs.c42
-rw-r--r--src/backend/utils/adt/array_typanalyze.c42
-rw-r--r--src/backend/utils/adt/cash.c4
-rw-r--r--src/backend/utils/adt/date.c2
-rw-r--r--src/backend/utils/adt/datetime.c2
-rw-r--r--src/backend/utils/adt/dbsize.c2
-rw-r--r--src/backend/utils/adt/float.c26
-rw-r--r--src/backend/utils/adt/formatting.c6
-rw-r--r--src/backend/utils/adt/inet_net_pton.c3
-rw-r--r--src/backend/utils/adt/json.c229
-rw-r--r--src/backend/utils/adt/lockfuncs.c6
-rw-r--r--src/backend/utils/adt/mac.c16
-rw-r--r--src/backend/utils/adt/misc.c19
-rw-r--r--src/backend/utils/adt/numeric.c8
-rw-r--r--src/backend/utils/adt/pg_locale.c6
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c8
-rw-r--r--src/backend/utils/adt/rangetypes.c54
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c231
-rw-r--r--src/backend/utils/adt/ruleutils.c92
-rw-r--r--src/backend/utils/adt/selfuncs.c91
-rw-r--r--src/backend/utils/adt/timestamp.c18
-rw-r--r--src/backend/utils/adt/tsgistidx.c4
-rw-r--r--src/backend/utils/adt/tsquery_util.c2
-rw-r--r--src/backend/utils/adt/tsrank.c4
-rw-r--r--src/backend/utils/adt/tsvector_op.c6
-rw-r--r--src/backend/utils/adt/varbit.c2
-rw-r--r--src/backend/utils/adt/varchar.c2
-rw-r--r--src/backend/utils/adt/varlena.c11
-rw-r--r--src/backend/utils/adt/xml.c36
-rw-r--r--src/backend/utils/cache/catcache.c4
-rw-r--r--src/backend/utils/cache/inval.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c8
-rw-r--r--src/backend/utils/cache/plancache.c82
-rw-r--r--src/backend/utils/cache/relcache.c12
-rw-r--r--src/backend/utils/cache/ts_cache.c8
-rw-r--r--src/backend/utils/error/elog.c12
-rw-r--r--src/backend/utils/fmgr/fmgr.c4
-rw-r--r--src/backend/utils/fmgr/funcapi.c16
-rw-r--r--src/backend/utils/init/miscinit.c2
-rw-r--r--src/backend/utils/mb/wchar.c23
-rw-r--r--src/backend/utils/misc/guc.c24
-rw-r--r--src/backend/utils/mmgr/portalmem.c2
-rw-r--r--src/backend/utils/sort/sortsupport.c5
-rw-r--r--src/backend/utils/sort/tuplesort.c31
-rw-r--r--src/backend/utils/sort/tuplestore.c2
-rw-r--r--src/backend/utils/time/snapmgr.c82
-rw-r--r--src/backend/utils/time/tqual.c10
48 files changed, 666 insertions, 643 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index a8bf2bfffb2..77322a115f1 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -835,15 +835,15 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
Datum
acldefault_sql(PG_FUNCTION_ARGS)
{
- char objtypec = PG_GETARG_CHAR(0);
- Oid owner = PG_GETARG_OID(1);
+ char objtypec = PG_GETARG_CHAR(0);
+ Oid owner = PG_GETARG_OID(1);
GrantObjectType objtype = 0;
switch (objtypec)
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index bc4ebd20749..1b7d46f8586 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -172,7 +172,7 @@ scalararraysel_containment(PlannerInfo *root,
selec = mcelem_array_contain_overlap_selec(values, nvalues,
numbers, nnumbers,
&constval, 1,
- OID_ARRAY_CONTAINS_OP,
+ OID_ARRAY_CONTAINS_OP,
cmpfunc);
else
selec = mcelem_array_contained_selec(values, nvalues,
@@ -193,7 +193,7 @@ scalararraysel_containment(PlannerInfo *root,
selec = mcelem_array_contain_overlap_selec(NULL, 0,
NULL, 0,
&constval, 1,
- OID_ARRAY_CONTAINS_OP,
+ OID_ARRAY_CONTAINS_OP,
cmpfunc);
else
selec = mcelem_array_contained_selec(NULL, 0,
@@ -285,8 +285,8 @@ arraycontsel(PG_FUNCTION_ARGS)
}
/*
- * If var is on the right, commute the operator, so that we can assume
- * the var is on the left in what follows.
+ * If var is on the right, commute the operator, so that we can assume the
+ * var is on the left in what follows.
*/
if (!varonleft)
{
@@ -451,7 +451,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
float4 *hist, int nhist,
Oid operator, FmgrInfo *cmpfunc)
{
- Selectivity selec;
+ Selectivity selec;
int num_elems;
Datum *elem_values;
bool *elem_nulls;
@@ -500,7 +500,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
if (operator == OID_ARRAY_CONTAINS_OP || operator == OID_ARRAY_OVERLAP_OP)
selec = mcelem_array_contain_overlap_selec(mcelem, nmcelem,
numbers, nnumbers,
- elem_values, nonnull_nitems,
+ elem_values, nonnull_nitems,
operator, cmpfunc);
else if (operator == OID_ARRAY_CONTAINED_OP)
selec = mcelem_array_contained_selec(mcelem, nmcelem,
@@ -626,7 +626,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
else
{
if (cmp == 0)
- match = true; /* mcelem is found */
+ match = true; /* mcelem is found */
break;
}
}
@@ -687,7 +687,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -806,7 +806,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
else
{
if (cmp == 0)
- match = true; /* mcelem is found */
+ match = true; /* mcelem is found */
break;
}
}
@@ -854,7 +854,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -866,7 +866,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -876,11 +876,11 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
- double b = (double) nmcelem;
- int n;
+ double b = (double) nmcelem;
+ int n;
n = (int) ((sqrt(b * b + 4 * EFFORT * b) - b) / 2);
@@ -891,9 +891,9 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
}
/*
- * Calculate probabilities of each distinct element count for both
- * mcelems and constant elements. At this point, assume independent
- * element occurrence.
+ * Calculate probabilities of each distinct element count for both mcelems
+ * and constant elements. At this point, assume independent element
+ * occurrence.
*/
dist = calc_distr(elem_selec, unique_nitems, unique_nitems, 0.0f);
mcelem_dist = calc_distr(numbers, nmcelem, unique_nitems, rest);
@@ -906,8 +906,8 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
{
/*
* mult * dist[i] / mcelem_dist[i] gives us probability of qual
- * matching from assumption of independent element occurrence with
- * the condition that distinct element count = i.
+ * matching from assumption of independent element occurrence with the
+ * condition that distinct element count = i.
*/
if (mcelem_dist[i] > 0)
selec += hist_part[i] * mult * dist[i] / mcelem_dist[i];
@@ -951,7 +951,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1018,7 +1018,7 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
* probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index fa79d9fa6b4..604b86ca644 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -42,9 +42,9 @@ typedef struct
char typalign;
/*
- * Lookup data for element type's comparison and hash functions (these
- * are in the type's typcache entry, which we expect to remain valid
- * over the lifespan of the ANALYZE run)
+ * Lookup data for element type's comparison and hash functions (these are
+ * in the type's typcache entry, which we expect to remain valid over the
+ * lifespan of the ANALYZE run)
*/
FmgrInfo *cmp;
FmgrInfo *hash;
@@ -149,8 +149,8 @@ array_typanalyze(PG_FUNCTION_ARGS)
stats->extra_data = extra_data;
/*
- * Note we leave stats->minrows set as std_typanalyze set it. Should
- * it be increased for array analysis purposes?
+ * Note we leave stats->minrows set as std_typanalyze set it. Should it
+ * be increased for array analysis purposes?
*/
PG_RETURN_BOOL(true);
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -242,8 +242,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Invoke analyze.c's standard analysis function to create scalar-style
- * stats for the column. It will expect its own extra_data pointer,
- * so temporarily install that.
+ * stats for the column. It will expect its own extra_data pointer, so
+ * temporarily install that.
*/
stats->extra_data = extra_data->std_extra_data;
(*extra_data->std_compute_stats) (stats, fetchfunc, samplerows, totalrows);
@@ -373,8 +373,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* The element value is already on the tracking list */
/*
- * The operators we assist ignore duplicate array elements,
- * so count a given distinct element only once per array.
+ * The operators we assist ignore duplicate array elements, so
+ * count a given distinct element only once per array.
*/
if (item->last_container == array_no)
continue;
@@ -387,11 +387,11 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* Initialize new tracking list element */
/*
- * If element type is pass-by-reference, we must copy it
- * into palloc'd space, so that we can release the array
- * below. (We do this so that the space needed for element
- * values is limited by the size of the hashtable; if we
- * kept all the array values around, it could be much more.)
+ * If element type is pass-by-reference, we must copy it into
+ * palloc'd space, so that we can release the array below.
+ * (We do this so that the space needed for element values is
+ * limited by the size of the hashtable; if we kept all the
+ * array values around, it could be much more.)
*/
item->key = datumCopy(elem_value,
extra_data->typbyval,
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
@@ -655,7 +655,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
}
hist[i] = sorted_count_items[j]->count;
- frac -= delta; /* update y for upcoming i increment */
+ frac -= delta; /* update y for upcoming i increment */
}
Assert(j == count_items_count - 1);
@@ -775,8 +775,8 @@ trackitem_compare_element(const void *e1, const void *e2)
static int
countitem_compare_count(const void *e1, const void *e2)
{
- const DECountItem * const *t1 = (const DECountItem * const *) e1;
- const DECountItem * const *t2 = (const DECountItem * const *) e2;
+ const DECountItem *const * t1 = (const DECountItem *const *) e1;
+ const DECountItem *const * t2 = (const DECountItem *const *) e2;
if ((*t1)->count < (*t2)->count)
return -1;
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 4a2d413ba20..82551c5f30e 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -133,7 +133,7 @@ cash_in(PG_FUNCTION_ARGS)
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
- else /* ssymbol should not equal dsymbol */
+ else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
psymbol = (*lconvert->positive_sign != '\0') ? lconvert->positive_sign : "+";
@@ -301,7 +301,7 @@ cash_out(PG_FUNCTION_ARGS)
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
- else /* ssymbol should not equal dsymbol */
+ else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 0fc187e0d6f..6e29ebb7844 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -337,7 +337,7 @@ date_fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
date_sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = date_fastcmp;
PG_RETURN_VOID();
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index d5d34da5521..1c2c39b2e27 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -4170,7 +4170,7 @@ TemporalTransform(int32 max_precis, Node *node)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_precis = exprTypmod(source);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index fd19de72cb6..2ccdc0cee6e 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -555,6 +555,7 @@ static char *
numeric_to_cstring(Numeric n)
{
Datum d = NumericGetDatum(n);
+
return DatumGetCString(DirectFunctionCall1(numeric_out, d));
}
@@ -562,6 +563,7 @@ static Numeric
int64_to_numeric(int64 v)
{
Datum d = Int64GetDatum(v);
+
return DatumGetNumeric(DirectFunctionCall1(int8_numeric, d));
}
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index ca0042a1762..b7ce9357f4a 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -217,7 +217,7 @@ float4in(PG_FUNCTION_ARGS)
/* did we not see anything that looks like a double? */
if (endptr == num || errno != 0)
{
- int save_errno = errno;
+ int save_errno = errno;
/*
* C99 requires that strtod() accept NaN and [-]Infinity, but not all
@@ -244,9 +244,9 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try
- * to detect whether it's a "real" out-of-range condition by
- * checking to see if the result is zero or huge.
+ * precision). We'd prefer not to throw error for that, so try to
+ * detect whether it's a "real" out-of-range condition by checking
+ * to see if the result is zero or huge.
*/
if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL)
ereport(ERROR,
@@ -422,7 +422,7 @@ float8in(PG_FUNCTION_ARGS)
/* did we not see anything that looks like a double? */
if (endptr == num || errno != 0)
{
- int save_errno = errno;
+ int save_errno = errno;
/*
* C99 requires that strtod() accept NaN and [-]Infinity, but not all
@@ -449,15 +449,15 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try
- * to detect whether it's a "real" out-of-range condition by
- * checking to see if the result is zero or huge.
+ * precision). We'd prefer not to throw error for that, so try to
+ * detect whether it's a "real" out-of-range condition by checking
+ * to see if the result is zero or huge.
*/
if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
}
else
ereport(ERROR,
@@ -973,7 +973,7 @@ btfloat4fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btfloat4sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btfloat4fastcmp;
PG_RETURN_VOID();
@@ -1087,7 +1087,7 @@ btfloat8fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btfloat8sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btfloat8fastcmp;
PG_RETURN_VOID();
@@ -2750,7 +2750,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
if (isnan(operand) || isnan(bound1) || isnan(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
+ errmsg("operand, lower bound, and upper bound cannot be NaN")));
/* Note that we allow "operand" to be infinite */
if (isinf(bound1) || isinf(bound2))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index d848739d4aa..765c6aa8d5e 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1987,8 +1987,8 @@ static int
adjust_partial_year_to_2020(int year)
{
/*
- * Adjust all dates toward 2020; this is effectively what happens
- * when we assume '70' is 1970 and '69' is 2069.
+ * Adjust all dates toward 2020; this is effectively what happens when we
+ * assume '70' is 1970 and '69' is 2069.
*/
/* Force 0-69 into the 2000's */
if (year < 70)
@@ -4485,7 +4485,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
*/
if (Np->last_relevant && Np->Num->zero_end > Np->num_pre)
{
- char *last_zero;
+ char *last_zero;
last_zero = Np->number + (Np->Num->zero_end - Np->num_pre);
if (Np->last_relevant < last_zero)
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index 66cdacecb54..9064eaf64b0 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -30,7 +30,8 @@ static const char rcsid[] = "Id: inet_net_pton.c,v 1.4.2.3 2004/03/17 00:40:11 m
#include <assert.h>
#include <ctype.h>
-#include "utils/builtins.h" /* pgrminclude ignore */ /* needed on some platforms */
+#include "utils/builtins.h" /* pgrminclude ignore */ /* needed on some
+ * platforms */
#include "utils/inet.h"
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 61ae62eb8a9..e494630d60b 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -42,7 +42,7 @@ typedef struct
char *input;
char *token_start;
char *token_terminator;
- JsonValueType token_type;
+ JsonValueType token_type;
int line_number;
char *line_start;
} JsonLexContext;
@@ -60,7 +60,7 @@ typedef enum
typedef struct JsonParseStack
{
- JsonParseState state;
+ JsonParseState state;
} JsonParseStack;
typedef enum
@@ -80,9 +80,9 @@ static void report_invalid_token(JsonLexContext *lex);
static char *extract_mb_char(char *s);
static void composite_to_json(Datum composite, StringInfo result, bool use_line_feeds);
static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims,
- Datum *vals, bool *nulls, int *valcount,
- TYPCATEGORY tcategory, Oid typoutputfunc,
- bool use_line_feeds);
+ Datum *vals, bool *nulls, int *valcount,
+ TYPCATEGORY tcategory, Oid typoutputfunc,
+ bool use_line_feeds);
static void array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds);
/* fake type category for JSON so we can distinguish it in datum_to_json */
@@ -95,7 +95,7 @@ static void array_to_json_internal(Datum array, StringInfo result, bool use_line
Datum
json_in(PG_FUNCTION_ARGS)
{
- char *text = PG_GETARG_CSTRING(0);
+ char *text = PG_GETARG_CSTRING(0);
json_validate_cstring(text);
@@ -108,7 +108,7 @@ json_in(PG_FUNCTION_ARGS)
Datum
json_out(PG_FUNCTION_ARGS)
{
- Datum txt = PG_GETARG_DATUM(0);
+ Datum txt = PG_GETARG_DATUM(0);
PG_RETURN_CSTRING(TextDatumGetCString(txt));
}
@@ -120,7 +120,7 @@ Datum
json_send(PG_FUNCTION_ARGS)
{
StringInfoData buf;
- text *t = PG_GETARG_TEXT_PP(0);
+ text *t = PG_GETARG_TEXT_PP(0);
pq_begintypsend(&buf);
pq_sendtext(&buf, VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t));
@@ -163,10 +163,10 @@ json_recv(PG_FUNCTION_ARGS)
static void
json_validate_cstring(char *input)
{
- JsonLexContext lex;
+ JsonLexContext lex;
JsonParseStack *stack,
- *stacktop;
- int stacksize;
+ *stacktop;
+ int stacksize;
/* Set up lexing context. */
lex.input = input;
@@ -183,7 +183,7 @@ json_validate_cstring(char *input)
/* Main parsing loop. */
for (;;)
{
- JsonStackOp op;
+ JsonStackOp op;
/* Fetch next token. */
json_lex(&lex);
@@ -213,7 +213,7 @@ redo:
else if (lex.token_start[0] == ']')
op = JSON_STACKOP_POP;
else if (lex.token_start[0] == '['
- || lex.token_start[0] == '{')
+ || lex.token_start[0] == '{')
{
stack->state = JSON_PARSE_ARRAY_NEXT;
op = JSON_STACKOP_PUSH_WITH_PUSHBACK;
@@ -235,7 +235,7 @@ redo:
if (lex.token_type == JSON_VALUE_STRING)
stack->state = JSON_PARSE_OBJECT_LABEL;
else if (lex.token_type == JSON_VALUE_INVALID
- && lex.token_start[0] == '}')
+ && lex.token_start[0] == '}')
op = JSON_STACKOP_POP;
else
report_parse_error(stack, &lex);
@@ -268,7 +268,7 @@ redo:
break;
default:
elog(ERROR, "unexpected json parse state: %d",
- (int) stack->state);
+ (int) stack->state);
}
/* Push or pop the stack, if needed. */
@@ -279,7 +279,8 @@ redo:
++stack;
if (stack >= &stacktop[stacksize])
{
- int stackoffset = stack - stacktop;
+ int stackoffset = stack - stacktop;
+
stacksize = stacksize + 32;
stacktop = repalloc(stacktop,
sizeof(JsonParseStack) * stacksize);
@@ -362,19 +363,19 @@ json_lex(JsonLexContext *lex)
}
else
{
- char *p;
+ char *p;
/*
- * We're not dealing with a string, number, legal punctuation mark,
- * or end of string. The only legal tokens we might find here are
- * true, false, and null, but for error reporting purposes we scan
- * until we see a non-alphanumeric character. That way, we can report
- * the whole word as an unexpected token, rather than just some
+ * We're not dealing with a string, number, legal punctuation mark, or
+ * end of string. The only legal tokens we might find here are true,
+ * false, and null, but for error reporting purposes we scan until we
+ * see a non-alphanumeric character. That way, we can report the
+ * whole word as an unexpected token, rather than just some
* unintuitive prefix thereof.
*/
- for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
- || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p);
- ++p)
+ for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
+ || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p);
+ ++p)
;
/*
@@ -431,7 +432,7 @@ json_lex_string(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: Character with value \"0x%02x\" must be escaped.",
- lex->line_number, (unsigned char) *s)));
+ lex->line_number, (unsigned char) *s)));
}
else if (*s == '\\')
{
@@ -444,8 +445,8 @@ json_lex_string(JsonLexContext *lex)
}
else if (*s == 'u')
{
- int i;
- int ch = 0;
+ int i;
+ int ch = 0;
for (i = 1; i <= 4; ++i)
{
@@ -466,7 +467,7 @@ json_lex_string(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: \"\\u\" must be followed by four hexadecimal digits.",
- lex->line_number)));
+ lex->line_number)));
}
}
@@ -479,8 +480,8 @@ json_lex_string(JsonLexContext *lex)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
- errdetail_internal("line %d: Invalid escape \"\\%s\".",
- lex->line_number, extract_mb_char(s))));
+ errdetail_internal("line %d: Invalid escape \"\\%s\".",
+ lex->line_number, extract_mb_char(s))));
}
}
}
@@ -497,17 +498,17 @@ json_lex_string(JsonLexContext *lex)
* (1) An optional minus sign ('-').
*
* (2) Either a single '0', or a string of one or more digits that does not
- * begin with a '0'.
+ * begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
- * completely, it's not OK to have only the decimal point without
- * any digits afterwards.)
+ * one or more digits. (Note: While this part can be omitted
+ * completely, it's not OK to have only the decimal point without
+ * any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
- * As with the decimal part, if 'e' or 'E' is present, it must be
- * followed by at least one digit.)
+ * followed by '+' or '-', followed by one or more digits. (Note:
+ * As with the decimal part, if 'e' or 'E' is present, it must be
+ * followed by at least one digit.)
*
* The 's' argument to this function points to the ostensible beginning
* of part 2 - i.e. the character after any optional minus sign, and the
@@ -518,8 +519,8 @@ json_lex_string(JsonLexContext *lex)
static void
json_lex_number(JsonLexContext *lex, char *s)
{
- bool error = false;
- char *p;
+ bool error = false;
+ char *p;
/* Part (1): leading sign indicator. */
/* Caller already did this for us; so do nothing. */
@@ -571,7 +572,7 @@ json_lex_number(JsonLexContext *lex, char *s)
/* Check for trailing garbage. */
for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
- || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p); ++p)
+ || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p); ++p)
;
lex->token_terminator = p;
if (p > s || error)
@@ -584,17 +585,17 @@ json_lex_number(JsonLexContext *lex, char *s)
static void
report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
{
- char *detail = NULL;
- char *token = NULL;
- int toklen;
+ char *detail = NULL;
+ char *token = NULL;
+ int toklen;
/* Handle case where the input ended prematurely. */
if (lex->token_start == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json: \"%s\"",
- lex->input),
- errdetail_internal("The input string ended unexpectedly.")));
+ lex->input),
+ errdetail_internal("The input string ended unexpectedly.")));
/* Work out the offending token. */
toklen = lex->token_terminator - lex->token_start;
@@ -636,8 +637,8 @@ report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json: \"%s\"",
- lex->input),
- detail ? errdetail_internal(detail, lex->line_number, token) : 0));
+ lex->input),
+ detail ? errdetail_internal(detail, lex->line_number, token) : 0));
}
/*
@@ -646,8 +647,8 @@ report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
static void
report_invalid_token(JsonLexContext *lex)
{
- char *token;
- int toklen;
+ char *token;
+ int toklen;
toklen = lex->token_terminator - lex->token_start;
token = palloc(toklen + 1);
@@ -658,7 +659,7 @@ report_invalid_token(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: Token \"%s\" is invalid.",
- lex->line_number, token)));
+ lex->line_number, token)));
}
/*
@@ -667,8 +668,8 @@ report_invalid_token(JsonLexContext *lex)
static char *
extract_mb_char(char *s)
{
- char *res;
- int len;
+ char *res;
+ int len;
len = pg_mblen(s);
res = palloc(len + 1);
@@ -687,11 +688,11 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
Oid typoutputfunc)
{
- char *outputstr;
+ char *outputstr;
if (is_null)
{
- appendStringInfoString(result,"null");
+ appendStringInfoString(result, "null");
return;
}
@@ -705,19 +706,20 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
break;
case TYPCATEGORY_BOOLEAN:
if (DatumGetBool(val))
- appendStringInfoString(result,"true");
+ appendStringInfoString(result, "true");
else
- appendStringInfoString(result,"false");
+ appendStringInfoString(result, "false");
break;
case TYPCATEGORY_NUMERIC:
outputstr = OidOutputFunctionCall(typoutputfunc, val);
+
/*
- * Don't call escape_json here if it's a valid JSON
- * number. Numeric output should usually be a valid
- * JSON number and JSON numbers shouldn't be quoted.
- * Quote cases like "Nan" and "Infinity", however.
+ * Don't call escape_json here if it's a valid JSON number.
+ * Numeric output should usually be a valid JSON number and JSON
+ * numbers shouldn't be quoted. Quote cases like "Nan" and
+ * "Infinity", however.
*/
- if (strpbrk(outputstr,NON_NUMERIC_LETTER) == NULL)
+ if (strpbrk(outputstr, NON_NUMERIC_LETTER) == NULL)
appendStringInfoString(result, outputstr);
else
escape_json(result, outputstr);
@@ -742,13 +744,13 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
* ourselves recursively to process the next dimension.
*/
static void
-array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
- bool *nulls, int * valcount, TYPCATEGORY tcategory,
+array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals,
+ bool *nulls, int *valcount, TYPCATEGORY tcategory,
Oid typoutputfunc, bool use_line_feeds)
{
- int i;
- char *sep;
+ int i;
+ char *sep;
Assert(dim < ndims);
@@ -759,7 +761,7 @@ array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
for (i = 1; i <= dims[dim]; i++)
{
if (i > 1)
- appendStringInfoString(result,sep);
+ appendStringInfoString(result, sep);
if (dim + 1 == ndims)
{
@@ -770,10 +772,10 @@ array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
else
{
/*
- * Do we want line feeds on inner dimensions of arrays?
- * For now we'll say no.
+ * Do we want line feeds on inner dimensions of arrays? For now
+ * we'll say no.
*/
- array_dim_to_json(result, dim+1, ndims, dims, vals, nulls,
+ array_dim_to_json(result, dim + 1, ndims, dims, vals, nulls,
valcount, tcategory, typoutputfunc, false);
}
}
@@ -792,9 +794,9 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
int *dim;
int ndim;
int nitems;
- int count = 0;
+ int count = 0;
Datum *elements;
- bool *nulls;
+ bool *nulls;
int16 typlen;
bool typbyval;
@@ -810,7 +812,7 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
if (nitems <= 0)
{
- appendStringInfoString(result,"[]");
+ appendStringInfoString(result, "[]");
return;
}
@@ -842,52 +844,54 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
static void
composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
{
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup, *tuple;
- int i;
- bool needsep = false;
- char *sep;
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup,
+ *tuple;
+ int i;
+ bool needsep = false;
+ char *sep;
sep = use_line_feeds ? ",\n " : ",";
- td = DatumGetHeapTupleHeader(composite);
+ td = DatumGetHeapTupleHeader(composite);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- tmptup.t_data = td;
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ tmptup.t_data = td;
tuple = &tmptup;
- appendStringInfoChar(result,'{');
+ appendStringInfoChar(result, '{');
- for (i = 0; i < tupdesc->natts; i++)
- {
- Datum val, origval;
- bool isnull;
- char *attname;
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ Datum val,
+ origval;
+ bool isnull;
+ char *attname;
TYPCATEGORY tcategory;
Oid typoutput;
bool typisvarlena;
if (tupdesc->attrs[i]->attisdropped)
- continue;
+ continue;
if (needsep)
- appendStringInfoString(result,sep);
+ appendStringInfoString(result, sep);
needsep = true;
- attname = NameStr(tupdesc->attrs[i]->attname);
- escape_json(result,attname);
- appendStringInfoChar(result,':');
+ attname = NameStr(tupdesc->attrs[i]->attname);
+ escape_json(result, attname);
+ appendStringInfoChar(result, ':');
- origval = heap_getattr(tuple, i + 1, tupdesc, &isnull);
+ origval = heap_getattr(tuple, i + 1, tupdesc, &isnull);
if (tupdesc->attrs[i]->atttypid == RECORDARRAYOID)
tcategory = TYPCATEGORY_ARRAY;
@@ -902,10 +906,10 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
&typoutput, &typisvarlena);
/*
- * If we have a toasted datum, forcibly detoast it here to avoid memory
- * leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
- if (typisvarlena && ! isnull)
+ if (typisvarlena && !isnull)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
else
val = origval;
@@ -917,8 +921,8 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
pfree(DatumGetPointer(val));
}
- appendStringInfoChar(result,'}');
- ReleaseTupleDesc(tupdesc);
+ appendStringInfoChar(result, '}');
+ ReleaseTupleDesc(tupdesc);
}
/*
@@ -927,7 +931,7 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
extern Datum
array_to_json(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
+ Datum array = PG_GETARG_DATUM(0);
StringInfo result;
result = makeStringInfo();
@@ -943,8 +947,8 @@ array_to_json(PG_FUNCTION_ARGS)
extern Datum
array_to_json_pretty(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
- bool use_line_feeds = PG_GETARG_BOOL(1);
+ Datum array = PG_GETARG_DATUM(0);
+ bool use_line_feeds = PG_GETARG_BOOL(1);
StringInfo result;
result = makeStringInfo();
@@ -960,7 +964,7 @@ array_to_json_pretty(PG_FUNCTION_ARGS)
extern Datum
row_to_json(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
+ Datum array = PG_GETARG_DATUM(0);
StringInfo result;
result = makeStringInfo();
@@ -976,8 +980,8 @@ row_to_json(PG_FUNCTION_ARGS)
extern Datum
row_to_json_pretty(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
- bool use_line_feeds = PG_GETARG_BOOL(1);
+ Datum array = PG_GETARG_DATUM(0);
+ bool use_line_feeds = PG_GETARG_BOOL(1);
StringInfo result;
result = makeStringInfo();
@@ -1031,4 +1035,3 @@ escape_json(StringInfo buf, const char *str)
}
appendStringInfoCharMacro(buf, '\"');
}
-
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index ca1b1db18a6..33c5b64f50a 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -160,7 +160,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
bool nulls[NUM_LOCK_STATUS_COLUMNS];
HeapTuple tuple;
Datum result;
- LockInstanceData *instance;
+ LockInstanceData *instance;
instance = &(lockData->locks[mystatus->currIdx]);
@@ -375,8 +375,8 @@ pg_lock_status(PG_FUNCTION_ARGS)
nulls[11] = true;
/*
- * Lock mode. Currently all predicate locks are SIReadLocks, which
- * are always held (never waiting) and have no fast path
+ * Lock mode. Currently all predicate locks are SIReadLocks, which are
+ * always held (never waiting) and have no fast path
*/
values[12] = CStringGetTextDatum("SIReadLock");
values[13] = BoolGetDatum(true);
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 958ff54d73e..aa9993fa5c6 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -247,8 +247,8 @@ hashmacaddr(PG_FUNCTION_ARGS)
Datum
macaddr_not(PG_FUNCTION_ARGS)
{
- macaddr *addr = PG_GETARG_MACADDR_P(0);
- macaddr *result;
+ macaddr *addr = PG_GETARG_MACADDR_P(0);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = ~addr->a;
@@ -263,9 +263,9 @@ macaddr_not(PG_FUNCTION_ARGS)
Datum
macaddr_and(PG_FUNCTION_ARGS)
{
- macaddr *addr1 = PG_GETARG_MACADDR_P(0);
- macaddr *addr2 = PG_GETARG_MACADDR_P(1);
- macaddr *result;
+ macaddr *addr1 = PG_GETARG_MACADDR_P(0);
+ macaddr *addr2 = PG_GETARG_MACADDR_P(1);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = addr1->a & addr2->a;
@@ -280,9 +280,9 @@ macaddr_and(PG_FUNCTION_ARGS)
Datum
macaddr_or(PG_FUNCTION_ARGS)
{
- macaddr *addr1 = PG_GETARG_MACADDR_P(0);
- macaddr *addr2 = PG_GETARG_MACADDR_P(1);
- macaddr *result;
+ macaddr *addr1 = PG_GETARG_MACADDR_P(0);
+ macaddr *addr2 = PG_GETARG_MACADDR_P(1);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = addr1->a | addr2->a;
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 6bd7d531bbc..96e692766bf 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -329,14 +329,14 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
Datum
pg_tablespace_location(PG_FUNCTION_ARGS)
{
- Oid tablespaceOid = PG_GETARG_OID(0);
- char sourcepath[MAXPGPATH];
- char targetpath[MAXPGPATH];
- int rllen;
+ Oid tablespaceOid = PG_GETARG_OID(0);
+ char sourcepath[MAXPGPATH];
+ char targetpath[MAXPGPATH];
+ int rllen;
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -350,9 +350,10 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
PG_RETURN_TEXT_P(cstring_to_text(""));
#if defined(HAVE_READLINK) || defined(WIN32)
+
/*
- * Find the location of the tablespace by reading the symbolic link that is
- * in pg_tblspc/<oid>.
+ * Find the location of the tablespace by reading the symbolic link that
+ * is in pg_tblspc/<oid>.
*/
snprintf(sourcepath, sizeof(sourcepath), "pg_tblspc/%u", tablespaceOid);
@@ -510,8 +511,8 @@ pg_typeof(PG_FUNCTION_ARGS)
Datum
pg_collation_for(PG_FUNCTION_ARGS)
{
- Oid typeid;
- Oid collid;
+ Oid typeid;
+ Oid collid;
typeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!typeid)
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 14bbdad93ba..68c1f1de3b7 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -718,7 +718,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -734,7 +734,7 @@ numeric_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
@@ -748,7 +748,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -1222,7 +1222,7 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
NUMERIC_IS_NAN(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
+ errmsg("operand, lower bound, and upper bound cannot be NaN")));
init_var(&result_var);
init_var(&count_var);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 0920c13cd9f..de881bf6344 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -224,7 +224,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -578,7 +578,7 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
len = WideCharToMultiByte(CP_UTF8, 0, wbuf, len, dst, dstlen, NULL, NULL);
if (len == 0)
elog(ERROR,
- "could not convert string to UTF-8: error code %lu", GetLastError());
+ "could not convert string to UTF-8: error code %lu", GetLastError());
dst[len] = '\0';
if (encoding != PG_UTF8)
@@ -970,7 +970,7 @@ report_newlocale_failure(const char *localename)
errdetail("The operating system could not find any locale data for the locale name \"%s\".",
localename) : 0)));
}
-#endif /* HAVE_LOCALE_T */
+#endif /* HAVE_LOCALE_T */
/*
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 83d0c229917..7c0705abcc9 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1225,8 +1225,8 @@ pg_stat_get_db_stat_reset_time(PG_FUNCTION_ARGS)
Datum
pg_stat_get_db_temp_files(PG_FUNCTION_ARGS)
{
- Oid dbid = PG_GETARG_OID(0);
- int64 result;
+ Oid dbid = PG_GETARG_OID(0);
+ int64 result;
PgStat_StatDBEntry *dbentry;
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
@@ -1241,8 +1241,8 @@ pg_stat_get_db_temp_files(PG_FUNCTION_ARGS)
Datum
pg_stat_get_db_temp_bytes(PG_FUNCTION_ARGS)
{
- Oid dbid = PG_GETARG_OID(0);
- int64 result;
+ Oid dbid = PG_GETARG_OID(0);
+ int64 result;
PgStat_StatDBEntry *dbentry;
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 0994fa77cf6..22ceb3c01d4 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -54,19 +54,19 @@ typedef struct RangeIOData
static RangeIOData *get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid,
- IOFuncSelector func);
+ IOFuncSelector func);
static char range_parse_flags(const char *flags_str);
static void range_parse(const char *input_str, char *flags, char **lbound_str,
char **ubound_str);
static const char *range_parse_bound(const char *string, const char *ptr,
char **bound_str, bool *infinite);
static char *range_deparse(char flags, const char *lbound_str,
- const char *ubound_str);
+ const char *ubound_str);
static char *range_bound_escape(const char *value);
static bool range_contains_internal(TypeCacheEntry *typcache,
- RangeType *r1, RangeType *r2);
+ RangeType *r1, RangeType *r2);
static bool range_contains_elem_internal(TypeCacheEntry *typcache,
- RangeType *r, Datum val);
+ RangeType *r, Datum val);
static Size datum_compute_size(Size sz, Datum datum, bool typbyval,
char typalign, int16 typlen, char typstorage);
static Pointer datum_write(Pointer ptr, Datum datum, bool typbyval,
@@ -299,10 +299,10 @@ get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid, IOFuncSelector func)
if (cache == NULL || cache->typcache->type_id != rngtypid)
{
- int16 typlen;
- bool typbyval;
- char typalign;
- char typdelim;
+ int16 typlen;
+ bool typbyval;
+ char typalign;
+ char typdelim;
cache = (RangeIOData *) MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
sizeof(RangeIOData));
@@ -326,13 +326,13 @@ get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid, IOFuncSelector func)
if (func == IOFunc_receive)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(cache->typcache->rngelemtype->type_id))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(cache->typcache->rngelemtype->type_id))));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(cache->typcache->rngelemtype->type_id))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(cache->typcache->rngelemtype->type_id))));
}
fmgr_info_cxt(cache->typiofunc, &cache->proc,
fcinfo->flinfo->fn_mcxt);
@@ -397,7 +397,7 @@ range_constructor3(PG_FUNCTION_ARGS)
if (PG_ARGISNULL(2))
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("range constructor flags argument must not be NULL")));
+ errmsg("range constructor flags argument must not be NULL")));
flags = range_parse_flags(text_to_cstring(PG_GETARG_TEXT_P(2)));
@@ -716,9 +716,9 @@ range_adjacent(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(false);
/*
- * Given two ranges A..B and C..D, where B < C, the ranges are adjacent
- * if and only if the range B..C is empty, where inclusivity of these two
- * bounds is inverted compared to the original bounds. For discrete
+ * Given two ranges A..B and C..D, where B < C, the ranges are adjacent if
+ * and only if the range B..C is empty, where inclusivity of these two
+ * bounds is inverted compared to the original bounds. For discrete
* ranges, we have to rely on the canonicalization function to normalize
* B..C to empty if it contains no elements of the subtype. (If there is
* no canonicalization function, it's impossible for such a range to
@@ -920,7 +920,7 @@ range_minus(PG_FUNCTION_ARGS)
if (cmp_l1l2 < 0 && cmp_u1u2 > 0)
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("result of range difference would not be contiguous")));
+ errmsg("result of range difference would not be contiguous")));
if (cmp_l1u2 > 0 || cmp_u1l2 < 0)
PG_RETURN_RANGE(r1);
@@ -1180,11 +1180,11 @@ Datum
range_typanalyze(PG_FUNCTION_ARGS)
{
/*
- * For the moment, just punt and don't analyze range columns. If we
- * get close to release without having a better answer, we could
- * consider letting std_typanalyze do what it can ... but those stats
- * are probably next door to useless for most activity with range
- * columns, so it's not clear it's worth gathering them.
+ * For the moment, just punt and don't analyze range columns. If we get
+ * close to release without having a better answer, we could consider
+ * letting std_typanalyze do what it can ... but those stats are probably
+ * next door to useless for most activity with range columns, so it's not
+ * clear it's worth gathering them.
*/
PG_RETURN_BOOL(false);
}
@@ -1392,7 +1392,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1416,7 +1416,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1753,7 +1753,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -1971,7 +1971,7 @@ range_parse(const char *string, char *flags, char **lbound_str,
}
else if (*ptr == ')')
ptr++;
- else /* must be a comma */
+ else /* must be a comma */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("malformed range literal: \"%s\"",
@@ -2224,7 +2224,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 87f71e6812c..16103f854bc 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -57,7 +57,7 @@
#define LIMIT_RATIO 0.3
/* Constants for fixed penalty values */
-#define INFINITE_BOUND_PENALTY 2.0
+#define INFINITE_BOUND_PENALTY 2.0
#define CONTAIN_EMPTY_PENALTY 1.0
#define DEFAULT_SUBTYPE_DIFF_PENALTY 1.0
@@ -66,8 +66,8 @@
*/
typedef struct
{
- int index;
- RangeBound bound;
+ int index;
+ RangeBound bound;
} SingleBoundSortItem;
/* place on left or right side of split? */
@@ -83,15 +83,15 @@ typedef enum
typedef struct
{
TypeCacheEntry *typcache; /* typcache for range type */
- bool has_subtype_diff; /* does it have subtype_diff? */
+ bool has_subtype_diff; /* does it have subtype_diff? */
int entries_count; /* total number of entries being split */
/* Information about currently selected split follows */
bool first; /* true if no split was selected yet */
- RangeBound *left_upper; /* upper bound of left interval */
- RangeBound *right_lower; /* lower bound of right interval */
+ RangeBound *left_upper; /* upper bound of left interval */
+ RangeBound *right_lower; /* lower bound of right interval */
float4 ratio; /* split ratio */
float4 overlap; /* overlap between left and right predicate */
@@ -146,8 +146,8 @@ typedef struct
((RangeType *) DatumGetPointer(datumCopy(PointerGetDatum(r), \
false, -1)))
-static RangeType *range_super_union(TypeCacheEntry *typcache, RangeType * r1,
- RangeType * r2);
+static RangeType *range_super_union(TypeCacheEntry *typcache, RangeType *r1,
+ RangeType *r2);
static bool range_gist_consistent_int(FmgrInfo *flinfo,
StrategyNumber strategy, RangeType *key,
Datum query);
@@ -155,19 +155,19 @@ static bool range_gist_consistent_leaf(FmgrInfo *flinfo,
StrategyNumber strategy, RangeType *key,
Datum query);
static void range_gist_fallback_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v);
static void range_gist_class_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v,
- SplitLR *classes_groups);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v,
+ SplitLR *classes_groups);
static void range_gist_single_sorting_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v,
- bool use_upper_bound);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v,
+ bool use_upper_bound);
static void range_gist_double_sorting_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v);
static void range_gist_consider_split(ConsiderSplitContext *context,
RangeBound *right_lower, int min_left_count,
RangeBound *left_upper, int max_left_count);
@@ -177,7 +177,7 @@ static int interval_cmp_lower(const void *a, const void *b, void *arg);
static int interval_cmp_upper(const void *a, const void *b, void *arg);
static int common_entry_cmp(const void *i1, const void *i2);
static float8 call_subtype_diff(TypeCacheEntry *typcache,
- Datum val1, Datum val2);
+ Datum val1, Datum val2);
/* GiST query consistency check */
@@ -187,6 +187,7 @@ range_gist_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
Datum query = PG_GETARG_DATUM(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
RangeType *key = DatumGetRangeType(entry->key);
@@ -280,9 +281,9 @@ range_gist_penalty(PG_FUNCTION_ARGS)
range_deserialize(typcache, new, &new_lower, &new_upper, &new_empty);
/*
- * Distinct branches for handling distinct classes of ranges. Note
- * that penalty values only need to be commensurate within the same
- * class of new range.
+ * Distinct branches for handling distinct classes of ranges. Note that
+ * penalty values only need to be commensurate within the same class of
+ * new range.
*/
if (new_empty)
{
@@ -290,9 +291,9 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_empty)
{
/*
- * The best case is to insert it to empty original
- * range. Insertion here means no broadening of original range.
- * Also original range is the most narrow.
+ * The best case is to insert it to empty original range.
+ * Insertion here means no broadening of original range. Also
+ * original range is the most narrow.
*/
*penalty = 0.0;
}
@@ -309,7 +310,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -360,8 +361,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (RangeIsOrContainsEmpty(orig))
{
/*
- * Original range is narrower when it doesn't contain empty ranges.
- * Add additional penalty otherwise.
+ * Original range is narrower when it doesn't contain empty
+ * ranges. Add additional penalty otherwise.
*/
*penalty += CONTAIN_EMPTY_PENALTY;
}
@@ -374,11 +375,11 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_upper.infinite)
{
/*
- * (-inf, +inf) range won't be extended by insertion of
- * (-inf, x) range. It's a less desirable case than insertion
- * to (-inf, y) original range without extension, because in
- * that case original range is narrower. But we can't express
- * that in single float value.
+ * (-inf, +inf) range won't be extended by insertion of (-inf,
+ * x) range. It's a less desirable case than insertion to
+ * (-inf, y) original range without extension, because in that
+ * case original range is narrower. But we can't express that
+ * in single float value.
*/
*penalty = 0.0;
}
@@ -387,8 +388,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (range_cmp_bounds(typcache, &new_upper, &orig_upper) > 0)
{
/*
- * Get extension of original range using subtype_diff.
- * Use constant if subtype_diff unavailable.
+ * Get extension of original range using subtype_diff. Use
+ * constant if subtype_diff unavailable.
*/
if (has_subtype_diff)
*penalty = call_subtype_diff(typcache,
@@ -407,8 +408,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else
{
/*
- * If lower bound of original range is not -inf, then extension
- * of it is infinity.
+ * If lower bound of original range is not -inf, then extension of
+ * it is infinity.
*/
*penalty = get_float4_infinity();
}
@@ -421,11 +422,11 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_lower.infinite)
{
/*
- * (-inf, +inf) range won't be extended by insertion of
- * (x, +inf) range. It's a less desirable case than insertion
- * to (y, +inf) original range without extension, because in
- * that case original range is narrower. But we can't express
- * that in single float value.
+ * (-inf, +inf) range won't be extended by insertion of (x,
+ * +inf) range. It's a less desirable case than insertion to
+ * (y, +inf) original range without extension, because in that
+ * case original range is narrower. But we can't express that
+ * in single float value.
*/
*penalty = 0.0;
}
@@ -434,8 +435,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (range_cmp_bounds(typcache, &new_lower, &orig_lower) < 0)
{
/*
- * Get extension of original range using subtype_diff.
- * Use constant if subtype_diff unavailable.
+ * Get extension of original range using subtype_diff. Use
+ * constant if subtype_diff unavailable.
*/
if (has_subtype_diff)
*penalty = call_subtype_diff(typcache,
@@ -454,8 +455,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else
{
/*
- * If upper bound of original range is not +inf, then extension
- * of it is infinity.
+ * If upper bound of original range is not +inf, then extension of
+ * it is infinity.
*/
*penalty = get_float4_infinity();
}
@@ -506,7 +507,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -541,7 +542,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
memset(count_in_classes, 0, sizeof(count_in_classes));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
count_in_classes[get_gist_range_class(range)]++;
}
@@ -597,7 +598,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
* To which side of the split should each class go? Initialize them
* all to go to the left side.
*/
- SplitLR classes_groups[CLS_COUNT];
+ SplitLR classes_groups[CLS_COUNT];
memset(classes_groups, 0, sizeof(classes_groups));
@@ -610,16 +611,18 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
{
/*----------
* Try to split classes in one of two ways:
- * 1) containing infinities - not containing infinities
- * 2) containing empty - not containing empty
+ * 1) containing infinities - not containing infinities
+ * 2) containing empty - not containing empty
*
* Select the way which balances the ranges between left and right
* the best. If split in these ways is not possible, there are at
* most 3 classes, so just separate biggest class.
*----------
*/
- int infCount, nonInfCount;
- int emptyCount, nonEmptyCount;
+ int infCount,
+ nonInfCount;
+ int emptyCount,
+ nonEmptyCount;
nonInfCount =
count_in_classes[CLS_NORMAL] +
@@ -628,7 +631,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
infCount = total_count - nonInfCount;
nonEmptyCount =
- count_in_classes[CLS_NORMAL] +
+ count_in_classes[CLS_NORMAL] +
count_in_classes[CLS_LOWER_INF] +
count_in_classes[CLS_UPPER_INF] +
count_in_classes[CLS_LOWER_INF | CLS_UPPER_INF];
@@ -638,21 +641,22 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
(Abs(infCount - nonInfCount) <=
Abs(emptyCount - nonEmptyCount)))
{
- classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
+ classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
classes_groups[CLS_CONTAIN_EMPTY] = SPLIT_RIGHT;
- classes_groups[CLS_EMPTY] = SPLIT_RIGHT;
+ classes_groups[CLS_EMPTY] = SPLIT_RIGHT;
}
else if (emptyCount > 0 && nonEmptyCount > 0)
{
- classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
- classes_groups[CLS_LOWER_INF] = SPLIT_RIGHT;
- classes_groups[CLS_UPPER_INF] = SPLIT_RIGHT;
+ classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
+ classes_groups[CLS_LOWER_INF] = SPLIT_RIGHT;
+ classes_groups[CLS_UPPER_INF] = SPLIT_RIGHT;
classes_groups[CLS_LOWER_INF | CLS_UPPER_INF] = SPLIT_RIGHT;
}
else
{
/*
- * Either total_count == emptyCount or total_count == infCount.
+ * Either total_count == emptyCount or total_count ==
+ * infCount.
*/
classes_groups[biggest_class] = SPLIT_RIGHT;
}
@@ -673,10 +677,10 @@ range_gist_same(PG_FUNCTION_ARGS)
bool *result = (bool *) PG_GETARG_POINTER(2);
/*
- * range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to
- * check that for ourselves. More generally, if the entries have been
- * properly normalized, then unequal flags bytes must mean unequal ranges
- * ... so let's just test all the flag bits at once.
+ * range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
+ * that for ourselves. More generally, if the entries have been properly
+ * normalized, then unequal flags bytes must mean unequal ranges ... so
+ * let's just test all the flag bits at once.
*/
if (range_get_flags(r1) != range_get_flags(r2))
*result = false;
@@ -710,7 +714,7 @@ range_gist_same(PG_FUNCTION_ARGS)
* that *all* unions formed within the GiST index must go through here.
*/
static RangeType *
-range_super_union(TypeCacheEntry *typcache, RangeType * r1, RangeType * r2)
+range_super_union(TypeCacheEntry *typcache, RangeType *r1, RangeType *r2)
{
RangeType *result;
RangeBound lower1,
@@ -862,9 +866,10 @@ range_gist_consistent_int(FmgrInfo *flinfo, StrategyNumber strategy,
proc = range_contains;
break;
case RANGESTRAT_CONTAINED_BY:
+
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
@@ -875,6 +880,7 @@ range_gist_consistent_int(FmgrInfo *flinfo, StrategyNumber strategy,
proc = range_contains_elem;
break;
case RANGESTRAT_EQ:
+
/*
* If query is empty, descend only if the key is or contains any
* empty ranges. Otherwise, descend if key contains query.
@@ -959,9 +965,11 @@ range_gist_fallback_split(TypeCacheEntry *typcache,
GistEntryVector *entryvec,
GIST_SPLITVEC *v)
{
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff, split_idx;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff,
+ split_idx;
maxoff = entryvec->n - 1;
/* Split entries before this to left page, after to right: */
@@ -971,7 +979,7 @@ range_gist_fallback_split(TypeCacheEntry *typcache,
v->spl_nright = 0;
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
if (i < split_idx)
PLACE_LEFT(range, i);
@@ -996,9 +1004,10 @@ range_gist_class_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v,
SplitLR *classes_groups)
{
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff;
maxoff = entryvec->n - 1;
@@ -1006,8 +1015,8 @@ range_gist_class_split(TypeCacheEntry *typcache,
v->spl_nright = 0;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- int class;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ int class;
/* Get class of range */
class = get_gist_range_class(range);
@@ -1038,10 +1047,12 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v,
bool use_upper_bound)
{
- SingleBoundSortItem *sortItems;
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff, split_idx;
+ SingleBoundSortItem *sortItems;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff,
+ split_idx;
maxoff = entryvec->n - 1;
@@ -1053,9 +1064,9 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
*/
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- RangeBound bound2;
- bool empty;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeBound bound2;
+ bool empty;
sortItems[i - 1].index = i;
/* Put appropriate bound into array */
@@ -1078,8 +1089,8 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
for (i = 0; i < maxoff; i++)
{
- int idx = sortItems[i].index;
- RangeType *range = DatumGetRangeType(entryvec->vector[idx].key);
+ int idx = sortItems[i].index;
+ RangeType *range = DatumGetRangeType(entryvec->vector[idx].key);
if (i < split_idx)
PLACE_LEFT(range, idx);
@@ -1125,16 +1136,20 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v)
{
ConsiderSplitContext context;
- OffsetNumber i, maxoff;
- RangeType *range,
- *left_range = NULL,
- *right_range = NULL;
- int common_entries_count;
+ OffsetNumber i,
+ maxoff;
+ RangeType *range,
+ *left_range = NULL,
+ *right_range = NULL;
+ int common_entries_count;
NonEmptyRange *by_lower,
- *by_upper;
+ *by_upper;
CommonEntry *common_entries;
- int nentries, i1, i2;
- RangeBound *right_lower, *left_upper;
+ int nentries,
+ i1,
+ i2;
+ RangeBound *right_lower,
+ *left_upper;
memset(&context, 0, sizeof(ConsiderSplitContext));
context.typcache = typcache;
@@ -1151,8 +1166,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
/* Fill arrays of bounds */
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- bool empty;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ bool empty;
range_deserialize(typcache, range,
&by_lower[i - FirstOffsetNumber].lower,
@@ -1209,7 +1224,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
i1 = 0;
i2 = 0;
right_lower = &by_lower[i1].lower;
- left_upper = &by_upper[i2].lower;
+ left_upper = &by_upper[i2].lower;
while (true)
{
/*
@@ -1229,8 +1244,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
right_lower = &by_lower[i1].lower;
/*
- * Find count of ranges which anyway should be placed to the
- * left group.
+ * Find count of ranges which anyway should be placed to the left
+ * group.
*/
while (i2 < nentries &&
range_cmp_bounds(typcache, &by_upper[i2].upper,
@@ -1244,13 +1259,13 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
}
/*
- * Iterate over upper bound of left group finding greatest possible
- * lower bound of right group.
+ * Iterate over upper bound of left group finding greatest possible lower
+ * bound of right group.
*/
i1 = nentries - 1;
i2 = nentries - 1;
right_lower = &by_lower[i1].upper;
- left_upper = &by_upper[i2].upper;
+ left_upper = &by_upper[i2].upper;
while (true)
{
/*
@@ -1270,8 +1285,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
left_upper = &by_upper[i2].upper;
/*
- * Find count of intervals which anyway should be placed to the
- * right group.
+ * Find count of intervals which anyway should be placed to the right
+ * group.
*/
while (i1 >= 0 &&
range_cmp_bounds(typcache, &by_lower[i1].lower,
@@ -1295,9 +1310,9 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
}
/*
- * Ok, we have now selected bounds of the groups. Now we have to distribute
- * entries themselves. At first we distribute entries which can be placed
- * unambiguously and collect "common entries" to array.
+ * Ok, we have now selected bounds of the groups. Now we have to
+ * distribute entries themselves. At first we distribute entries which can
+ * be placed unambiguously and collect "common entries" to array.
*/
/* Allocate vectors for results */
@@ -1394,7 +1409,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
*/
for (i = 0; i < common_entries_count; i++)
{
- int idx = common_entries[i].index;
+ int idx = common_entries[i].index;
range = DatumGetRangeType(entryvec->vector[idx].key);
@@ -1530,8 +1545,8 @@ get_gist_range_class(RangeType *range)
static int
single_bound_cmp(const void *a, const void *b, void *arg)
{
- SingleBoundSortItem *i1 = (SingleBoundSortItem *) a;
- SingleBoundSortItem *i2 = (SingleBoundSortItem *) b;
+ SingleBoundSortItem *i1 = (SingleBoundSortItem *) a;
+ SingleBoundSortItem *i2 = (SingleBoundSortItem *) b;
TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
return range_cmp_bounds(typcache, &i1->bound, &i2->bound);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 7ad99a0ec32..9ca3b9d0c44 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -73,7 +73,7 @@
#define PRETTYFLAG_PAREN 1
#define PRETTYFLAG_INDENT 2
-#define PRETTY_WRAP_DEFAULT 79
+#define PRETTY_WRAP_DEFAULT 79
/* macro to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
@@ -138,7 +138,7 @@ static SPIPlanPtr plan_getrulebyoid = NULL;
static const char *query_getrulebyoid = "SELECT * FROM pg_catalog.pg_rewrite WHERE oid = $1";
static SPIPlanPtr plan_getviewrule = NULL;
static const char *query_getviewrule = "SELECT * FROM pg_catalog.pg_rewrite WHERE ev_class = $1 AND rulename = $2";
-static int pretty_wrap = PRETTY_WRAP_DEFAULT;
+static int pretty_wrap = PRETTY_WRAP_DEFAULT;
/* GUC parameters */
bool quote_all_identifiers = false;
@@ -388,9 +388,9 @@ pg_get_viewdef_wrap(PG_FUNCTION_ARGS)
{
/* By OID */
Oid viewoid = PG_GETARG_OID(0);
- int wrap = PG_GETARG_INT32(1);
+ int wrap = PG_GETARG_INT32(1);
int prettyFlags;
- char *result;
+ char *result;
/* calling this implies we want pretty printing */
prettyFlags = PRETTYFLAG_PAREN | PRETTYFLAG_INDENT;
@@ -1335,10 +1335,10 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
* Now emit the constraint definition, adding NO INHERIT if
* necessary.
*
- * There are cases where
- * the constraint expression will be fully parenthesized and
- * we don't need the outer parens ... but there are other
- * cases where we do need 'em. Be conservative for now.
+ * There are cases where the constraint expression will be
+ * fully parenthesized and we don't need the outer parens ...
+ * but there are other cases where we do need 'em. Be
+ * conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
* would NOT be good enough, consider "(x > 0) AND (y > 0)".
@@ -1599,7 +1599,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -3038,7 +3038,7 @@ get_target_list(List *targetList, deparse_context *context,
char *sep;
int colno;
ListCell *l;
- bool last_was_multiline = false;
+ bool last_was_multiline = false;
sep = " ";
colno = 0;
@@ -3048,9 +3048,9 @@ get_target_list(List *targetList, deparse_context *context,
char *colname;
char *attname;
StringInfoData targetbuf;
- int leading_nl_pos = -1;
- char *trailing_nl;
- int pos;
+ int leading_nl_pos = -1;
+ char *trailing_nl;
+ int pos;
if (tle->resjunk)
continue; /* ignore junk entries */
@@ -3060,9 +3060,8 @@ get_target_list(List *targetList, deparse_context *context,
colno++;
/*
- * Put the new field spec into targetbuf so we can
- * decide after we've got it whether or not it needs
- * to go on a new line.
+ * Put the new field spec into targetbuf so we can decide after we've
+ * got it whether or not it needs to go on a new line.
*/
initStringInfo(&targetbuf);
@@ -3112,7 +3111,7 @@ get_target_list(List *targetList, deparse_context *context,
/* Does the new field start with whitespace plus a new line? */
- for (pos=0; pos < targetbuf.len; pos++)
+ for (pos = 0; pos < targetbuf.len; pos++)
{
if (targetbuf.data[pos] == '\n')
{
@@ -3123,30 +3122,29 @@ get_target_list(List *targetList, deparse_context *context,
break;
}
- /* Locate the start of the current line in the buffer */
+ /* Locate the start of the current line in the buffer */
- trailing_nl = (strrchr(buf->data,'\n'));
+ trailing_nl = (strrchr(buf->data, '\n'));
if (trailing_nl == NULL)
trailing_nl = buf->data;
- else
+ else
trailing_nl++;
/*
- * If the field we're adding is the first in the list, or it already
- * has a leading newline, or wrap mode is disabled (pretty_wrap < 0),
- * don't add anything.
- * Otherwise, add a newline, plus some indentation, if either the
- * new field would cause an overflow or the last field used more than
- * one line.
+ * If the field we're adding is the first in the list, or it already
+ * has a leading newline, or wrap mode is disabled (pretty_wrap < 0),
+ * don't add anything. Otherwise, add a newline, plus some
+ * indentation, if either the new field would cause an overflow or the
+ * last field used more than one line.
*/
if (colno > 1 &&
- leading_nl_pos == -1 &&
+ leading_nl_pos == -1 &&
pretty_wrap >= 0 &&
((strlen(trailing_nl) + strlen(targetbuf.data) > pretty_wrap) ||
last_was_multiline))
{
- appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD, PRETTYINDENT_VAR);
}
@@ -3157,12 +3155,12 @@ get_target_list(List *targetList, deparse_context *context,
/* Keep track of this field's status for next iteration */
- last_was_multiline =
- (strchr(targetbuf.data + leading_nl_pos + 1,'\n') != NULL);
+ last_was_multiline =
+ (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL);
/* cleanup */
- pfree (targetbuf.data);
+ pfree(targetbuf.data);
}
}
@@ -4049,7 +4047,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -4059,7 +4057,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -4483,7 +4481,7 @@ find_rte_by_refname(const char *refname, deparse_context *context)
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -4615,7 +4613,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6567,10 +6565,10 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
else
{
StringInfoData targetbuf;
- char *trailing_nl;
+ char *trailing_nl;
appendStringInfoString(buf, ", ");
-
+
initStringInfo(&targetbuf);
context->buf = &targetbuf;
@@ -6578,33 +6576,33 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
context->buf = buf;
- /* Locate the start of the current line in the buffer */
+ /* Locate the start of the current line in the buffer */
- trailing_nl = (strrchr(buf->data,'\n'));
+ trailing_nl = (strrchr(buf->data, '\n'));
if (trailing_nl == NULL)
trailing_nl = buf->data;
- else
+ else
trailing_nl++;
-
+
/*
- * Add a newline, plus some indentation, if pretty_wrap is on and the
- * new from-clause item would cause an overflow.
+ * Add a newline, plus some indentation, if pretty_wrap is on and
+ * the new from-clause item would cause an overflow.
*/
-
+
if (pretty_wrap >= 0 &&
(strlen(trailing_nl) + strlen(targetbuf.data) > pretty_wrap))
{
- appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD, PRETTYINDENT_VAR);
}
/* Add the new item */
appendStringInfoString(buf, targetbuf.data);
-
+
/* cleanup */
- pfree (targetbuf.data);
+ pfree(targetbuf.data);
}
}
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 83e43a99972..95e46276f0a 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -258,7 +258,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -393,7 +393,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -1743,8 +1743,8 @@ scalararraysel(PlannerInfo *root,
}
/*
- * If it is equality or inequality, we might be able to estimate this as
- * a form of array containment; for instance "const = ANY(column)" can be
+ * If it is equality or inequality, we might be able to estimate this as a
+ * form of array containment; for instance "const = ANY(column)" can be
* treated as "ARRAY[const] <@ column". scalararraysel_containment tries
* that, and returns the selectivity estimate if successful, or -1 if not.
*/
@@ -1819,7 +1819,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2132,6 +2132,7 @@ eqjoinsel(PG_FUNCTION_ARGS)
break;
case JOIN_SEMI:
case JOIN_ANTI:
+
/*
* Look up the join's inner relation. min_righthand is sufficient
* information because neither SEMI nor ANTI joins permit any
@@ -2423,7 +2424,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -3879,7 +3880,7 @@ convert_string_datum(Datum value, Oid typid)
{
char *xfrmstr;
size_t xfrmlen;
- size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
+ size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
/*
* Note: originally we guessed at a suitable output buffer size, and
@@ -4475,7 +4476,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
* Punt if subquery uses set operations or GROUP BY, as these will
* mash underlying columns' stats beyond recognition. (Set ops are
* particularly nasty; if we forged ahead, we would return stats
- * relevant to only the leftmost subselect...) DISTINCT is also
+ * relevant to only the leftmost subselect...) DISTINCT is also
* problematic, but we check that later because there is a possibility
* of learning something even with it.
*/
@@ -4496,12 +4497,12 @@ examine_simple_variable(PlannerInfo *root, Var *var,
Assert(rel->subroot && IsA(rel->subroot, PlannerInfo));
/*
- * Switch our attention to the subquery as mangled by the planner.
- * It was okay to look at the pre-planning version for the tests
- * above, but now we need a Var that will refer to the subroot's
- * live RelOptInfos. For instance, if any subquery pullup happened
- * during planning, Vars in the targetlist might have gotten replaced,
- * and we need to see the replacement expressions.
+ * Switch our attention to the subquery as mangled by the planner. It
+ * was okay to look at the pre-planning version for the tests above,
+ * but now we need a Var that will refer to the subroot's live
+ * RelOptInfos. For instance, if any subquery pullup happened during
+ * planning, Vars in the targetlist might have gotten replaced, and we
+ * need to see the replacement expressions.
*/
subquery = rel->subroot->parse;
Assert(IsA(subquery, Query));
@@ -4530,13 +4531,13 @@ examine_simple_variable(PlannerInfo *root, Var *var,
/*
* If the sub-query originated from a view with the security_barrier
- * attribute, we must not look at the variable's statistics, though
- * it seems all right to notice the existence of a DISTINCT clause.
- * So stop here.
+ * attribute, we must not look at the variable's statistics, though it
+ * seems all right to notice the existence of a DISTINCT clause. So
+ * stop here.
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4563,7 +4564,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
/*
* Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
* won't see RTE_JOIN here because join alias Vars have already been
- * flattened.) There's not much we can do with function outputs, but
+ * flattened.) There's not much we can do with function outputs, but
* maybe someday try to be smarter about VALUES and/or CTEs.
*/
}
@@ -4679,8 +4680,8 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
/*
* With no data, estimate ndistinct = ntuples if the table is small, else
- * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small"
- * so that the behavior isn't discontinuous.
+ * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
+ * that the behavior isn't discontinuous.
*/
if (ntuples < DEFAULT_NUM_DISTINCT)
return ntuples;
@@ -6094,16 +6095,16 @@ string_to_bytea_const(const char *str, size_t str_len)
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6392,7 +6393,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6531,8 +6532,8 @@ btcostestimate(PG_FUNCTION_ARGS)
/*
* If the index is partial, AND the index predicate with the
- * index-bound quals to produce a more accurate idea of the number
- * of rows covered by the bound conditions.
+ * index-bound quals to produce a more accurate idea of the number of
+ * rows covered by the bound conditions.
*/
selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
@@ -6767,17 +6768,17 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
int32 i;
/*
- * Get the operator's strategy number and declared input data types
- * within the index opfamily. (We don't need the latter, but we use
- * get_op_opfamily_properties because it will throw error if it fails
- * to find a matching pg_amop entry.)
+ * Get the operator's strategy number and declared input data types within
+ * the index opfamily. (We don't need the latter, but we use
+ * get_op_opfamily_properties because it will throw error if it fails to
+ * find a matching pg_amop entry.)
*/
get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
&strategy_op, &lefttype, &righttype);
/*
- * GIN always uses the "default" support functions, which are those
- * with lefttype == righttype == the opclass' opcintype (see
+ * GIN always uses the "default" support functions, which are those with
+ * lefttype == righttype == the opclass' opcintype (see
* IndexSupportInitialize in relcache.c).
*/
extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
@@ -6864,7 +6865,7 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
else
{
elog(ERROR, "could not match index to operand");
- operand = NULL; /* keep compiler quiet */
+ operand = NULL; /* keep compiler quiet */
}
if (IsA(operand, RelabelType))
@@ -6872,8 +6873,8 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
/*
* It's impossible to call extractQuery method for unknown operand. So
- * unless operand is a Const we can't do much; just assume there will
- * be one ordinary search entry from the operand at runtime.
+ * unless operand is a Const we can't do much; just assume there will be
+ * one ordinary search entry from the operand at runtime.
*/
if (!IsA(operand, Const))
{
@@ -6901,7 +6902,7 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -6935,9 +6936,9 @@ gincost_scalararrayopexpr(IndexOptInfo *index, ScalarArrayOpExpr *clause,
/*
* It's impossible to call extractQuery method for unknown operand. So
- * unless operand is a Const we can't do much; just assume there will
- * be one ordinary search entry from each array entry at runtime, and
- * fall back on a probably-bad estimate of the number of array entries.
+ * unless operand is a Const we can't do much; just assume there will be
+ * one ordinary search entry from each array entry at runtime, and fall
+ * back on a probably-bad estimate of the number of array entries.
*/
if (!IsA(rightop, Const))
{
@@ -7156,7 +7157,7 @@ gincostestimate(PG_FUNCTION_ARGS)
else if (IsA(clause, ScalarArrayOpExpr))
{
matchPossible = gincost_scalararrayopexpr(index,
- (ScalarArrayOpExpr *) clause,
+ (ScalarArrayOpExpr *) clause,
numEntries,
&counts);
if (!matchPossible)
@@ -7194,7 +7195,8 @@ gincostestimate(PG_FUNCTION_ARGS)
outer_scans = loop_count;
/*
- * Compute cost to begin scan, first of all, pay attention to pending list.
+ * Compute cost to begin scan, first of all, pay attention to pending
+ * list.
*/
entryPagesFetched = numPendingPages;
@@ -7247,7 +7249,8 @@ gincostestimate(PG_FUNCTION_ARGS)
*indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
/*
- * Now we compute the number of data pages fetched while the scan proceeds.
+ * Now we compute the number of data pages fetched while the scan
+ * proceeds.
*/
/* data pages scanned for each exact (non-partial) matched entry */
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index a3e1e94a2b2..8593b6b47f4 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -757,7 +757,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -940,7 +940,7 @@ interval_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
@@ -965,7 +965,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -974,8 +974,8 @@ interval_transform(PG_FUNCTION_ARGS)
old_range_fls = fls(old_range);
if (new_typmod < 0 ||
((new_range_fls >= SECOND || new_range_fls >= old_range_fls) &&
- (old_range_fls < SECOND || new_precis >= MAX_INTERVAL_PRECISION ||
- new_precis >= old_precis)))
+ (old_range_fls < SECOND || new_precis >= MAX_INTERVAL_PRECISION ||
+ new_precis >= old_precis)))
ret = relabel_to_typmod(source, new_typmod);
}
@@ -1925,7 +1925,7 @@ timestamp_fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
timestamp_sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = timestamp_fastcmp;
PG_RETURN_VOID();
@@ -4067,11 +4067,11 @@ timestamp_part(PG_FUNCTION_ARGS)
{
case DTK_EPOCH:
#ifdef HAVE_INT64_TIMESTAMP
- result = (timestamp - SetEpochTimestamp()) / 1000000.0;
+ result = (timestamp - SetEpochTimestamp()) / 1000000.0;
#else
- result = timestamp - SetEpochTimestamp();
+ result = timestamp - SetEpochTimestamp();
#endif
- break;
+ break;
case DTK_DOW:
case DTK_ISODOW:
diff --git a/src/backend/utils/adt/tsgistidx.c b/src/backend/utils/adt/tsgistidx.c
index b427586d185..674e48c871d 100644
--- a/src/backend/utils/adt/tsgistidx.c
+++ b/src/backend/utils/adt/tsgistidx.c
@@ -593,8 +593,8 @@ typedef struct
static int
comparecost(const void *va, const void *vb)
{
- const SPLITCOST *a = (const SPLITCOST *) va;
- const SPLITCOST *b = (const SPLITCOST *) vb;
+ const SPLITCOST *a = (const SPLITCOST *) va;
+ const SPLITCOST *b = (const SPLITCOST *) vb;
if (a->cost == b->cost)
return 0;
diff --git a/src/backend/utils/adt/tsquery_util.c b/src/backend/utils/adt/tsquery_util.c
index ae00f180b5d..0724d33c1d0 100644
--- a/src/backend/utils/adt/tsquery_util.c
+++ b/src/backend/utils/adt/tsquery_util.c
@@ -134,7 +134,7 @@ QTNodeCompare(QTNode *an, QTNode *bn)
static int
cmpQTN(const void *a, const void *b)
{
- return QTNodeCompare(*(QTNode * const *) a, *(QTNode * const *) b);
+ return QTNodeCompare(*(QTNode *const *) a, *(QTNode *const *) b);
}
void
diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c
index 960233b6332..a45caf05af5 100644
--- a/src/backend/utils/adt/tsrank.c
+++ b/src/backend/utils/adt/tsrank.c
@@ -134,8 +134,8 @@ static int
compareQueryOperand(const void *a, const void *b, void *arg)
{
char *operand = (char *) arg;
- QueryOperand *qa = (*(QueryOperand * const *) a);
- QueryOperand *qb = (*(QueryOperand * const *) b);
+ QueryOperand *qa = (*(QueryOperand *const *) a);
+ QueryOperand *qb = (*(QueryOperand *const *) b);
return tsCompareString(operand + qa->distance, qa->length,
operand + qb->distance, qb->length,
diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c
index bab6534feaa..eb5c45b3d81 100644
--- a/src/backend/utils/adt/tsvector_op.c
+++ b/src/backend/utils/adt/tsvector_op.c
@@ -373,9 +373,9 @@ tsvector_concat(PG_FUNCTION_ARGS)
i2 = in2->size;
/*
- * Conservative estimate of space needed. We might need all the data
- * in both inputs, and conceivably add a pad byte before position data
- * for each item where there was none before.
+ * Conservative estimate of space needed. We might need all the data in
+ * both inputs, and conceivably add a pad byte before position data for
+ * each item where there was none before.
*/
output_bytes = VARSIZE(in1) + VARSIZE(in2) + i1 + i2;
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index e74e062338d..2bcf5b8aa8c 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -664,7 +664,7 @@ varbit_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 new_typmod = DatumGetInt32(((Const *) typmod)->constvalue);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 199330cef29..4cdb88837ba 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -561,7 +561,7 @@ varchar_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 53989d1ecb3..e1b57ba3fc7 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -1353,6 +1353,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
char a2buf[STACKBUFLEN];
char *a1p,
*a2p;
+
#ifdef HAVE_LOCALE_T
pg_locale_t mylocale = 0;
#endif
@@ -1413,8 +1414,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
(LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error code %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error code %lu",
+ GetLastError())));
}
((LPWSTR) a1p)[r] = 0;
@@ -1426,8 +1427,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
(LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error code %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error code %lu",
+ GetLastError())));
}
((LPWSTR) a2p)[r] = 0;
@@ -4001,7 +4002,7 @@ text_format_string_conversion(StringInfo buf, char conversion,
else if (conversion == 'I')
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("null values cannot be formatted as an SQL identifier")));
+ errmsg("null values cannot be formatted as an SQL identifier")));
return;
}
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index c51a9f76ced..44d327d7606 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -126,8 +126,8 @@ static bool print_xml_decl(StringInfo buf, const xmlChar *version,
static xmlDocPtr xml_parse(text *data, XmlOptionType xmloption_arg,
bool preserve_whitespace, int encoding);
static text *xml_xmlnodetoxmltype(xmlNodePtr cur);
-static int xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
- ArrayBuildState **astate);
+static int xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
+ ArrayBuildState **astate);
#endif /* USE_LIBXML */
static StringInfo query_to_xml_internal(const char *query, char *tablename,
@@ -913,7 +913,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -943,9 +943,9 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Save original error handler and install ours. libxml originally didn't
* distinguish between the contexts for generic and for structured error
- * handlers. If we're using an old libxml version, we must thus save
- * the generic error context, even though we're using a structured
- * error handler.
+ * handlers. If we're using an old libxml version, we must thus save the
+ * generic error context, even though we're using a structured error
+ * handler.
*/
errcxt->saved_errfunc = xmlStructuredError;
@@ -959,7 +959,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1014,9 +1014,9 @@ pg_xml_done(PgXmlErrorContext *errcxt, bool isError)
Assert(!errcxt->err_occurred || isError);
/*
- * Check that libxml's global state is correct, warn if not. This is
- * a real test and not an Assert because it has a higher probability
- * of happening.
+ * Check that libxml's global state is correct, warn if not. This is a
+ * real test and not an Assert because it has a higher probability of
+ * happening.
*/
#ifdef HAVE_XMLSTRUCTUREDERRORCONTEXT
cur_errcxt = xmlStructuredErrorContext;
@@ -1108,7 +1108,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1516,9 +1516,9 @@ xml_errorHandler(void *data, xmlErrorPtr error)
PgXmlErrorContext *xmlerrcxt = (PgXmlErrorContext *) data;
xmlParserCtxtPtr ctxt = (xmlParserCtxtPtr) error->ctxt;
xmlParserInputPtr input = (ctxt != NULL) ? ctxt->input : NULL;
- xmlNodePtr node = error->node;
+ xmlNodePtr node = error->node;
const xmlChar *name = (node != NULL &&
- node->type == XML_ELEMENT_NODE) ? node->name : NULL;
+ node->type == XML_ELEMENT_NODE) ? node->name : NULL;
int domain = error->domain;
int level = error->level;
StringInfo errorBuf;
@@ -1599,7 +1599,7 @@ xml_errorHandler(void *data, xmlErrorPtr error)
if (input != NULL)
{
xmlGenericErrorFunc errFuncSaved = xmlGenericError;
- void *errCtxSaved = xmlGenericErrorContext;
+ void *errCtxSaved = xmlGenericErrorContext;
xmlSetGenericErrorFunc((void *) errorBuf,
(xmlGenericErrorFunc) appendStringInfo);
@@ -1617,8 +1617,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -3574,7 +3574,7 @@ xml_xmlnodetoxmltype(xmlNodePtr cur)
PG_TRY();
{
/* Here we rely on XML having the same representation as TEXT */
- char *escaped = escape_xml((char *) str);
+ char *escaped = escape_xml((char *) str);
result = (xmltype *) cstring_to_text(escaped);
pfree(escaped);
@@ -3623,7 +3623,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
result = xpathobj->nodesetval->nodeNr;
if (astate != NULL)
{
- int i;
+ int i;
for (i = 0; i < result; i++)
{
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index ea3daa599ca..0307b9652d4 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1637,8 +1637,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
/*
* If there are any out-of-line toasted fields in the tuple, expand them
- * in-line. This saves cycles during later use of the catcache entry,
- * and also protects us against the possibility of the toast tuples being
+ * in-line. This saves cycles during later use of the catcache entry, and
+ * also protects us against the possibility of the toast tuples being
* freed before we attempt to fetch them, in case of something using a
* slightly stale catcache entry.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index d5fe85abbff..9ccfc4f1144 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -820,7 +820,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
- *
+ *
* In any case, reset the various lists to empty. We need not physically
* free memory here, since TopTransactionContext is about to be emptied
* anyway.
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 44dab822648..64b413bb6ae 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -283,7 +283,7 @@ get_sort_function_for_ordering_op(Oid opno, Oid *sortfunc,
opcintype,
opcintype,
BTORDER_PROC);
- if (!OidIsValid(*sortfunc)) /* should not happen */
+ if (!OidIsValid(*sortfunc)) /* should not happen */
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
BTORDER_PROC, opcintype, opcintype, opfamily);
*issupport = false;
@@ -1549,7 +1549,7 @@ func_volatile(Oid funcid)
/*
* get_func_leakproof
- * Given procedure id, return the function's leakproof field.
+ * Given procedure id, return the function's leakproof field.
*/
bool
get_func_leakproof(Oid funcid)
@@ -2914,8 +2914,8 @@ get_range_subtype(Oid rangeOid)
tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid));
if (HeapTupleIsValid(tp))
{
- Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
- Oid result;
+ Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
+ Oid result;
result = rngtup->rngsubtype;
ReleaseSysCache(tp);
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 6292f8dc6c9..c42765c25a7 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -77,9 +77,9 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource);
static bool CheckCachedPlan(CachedPlanSource *plansource);
static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
static bool choose_custom_plan(CachedPlanSource *plansource,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
static double cached_plan_cost(CachedPlan *plan);
static void AcquireExecutorLocks(List *stmt_list, bool acquire);
static void AcquirePlannerLocks(List *stmt_list, bool acquire);
@@ -111,7 +111,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -198,13 +198,13 @@ CreateCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -277,8 +277,8 @@ CompleteCachedPlan(CachedPlanSource *plansource,
/*
* Use the planner machinery to extract dependencies. Data is saved in
- * query_context. (We assume that not a lot of extra cruft is created
- * by this call.)
+ * query_context. (We assume that not a lot of extra cruft is created by
+ * this call.)
*/
extract_query_dependencies((Node *) querytree_list,
&plansource->relationOids,
@@ -327,7 +327,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
*
* This is guaranteed not to throw error; callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -341,16 +341,16 @@ SaveCachedPlan(CachedPlanSource *plansource)
/*
* In typical use, this function would be called before generating any
- * plans from the CachedPlanSource. If there is a generic plan, moving
- * it into CacheMemoryContext would be pretty risky since it's unclear
+ * plans from the CachedPlanSource. If there is a generic plan, moving it
+ * into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
- * Reparent the source memory context under CacheMemoryContext so that
- * it will live indefinitely. The query_context follows along since it's
+ * Reparent the source memory context under CacheMemoryContext so that it
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -474,8 +474,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
}
/*
- * Discard the no-longer-useful query tree. (Note: we don't want to
- * do this any earlier, else we'd not have been able to release locks
+ * Discard the no-longer-useful query tree. (Note: we don't want to do
+ * this any earlier, else we'd not have been able to release locks
* correctly in the race condition case.)
*/
plansource->is_valid = false;
@@ -484,14 +484,14 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->invalItems = NIL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
- * reasonably sane state. (The generic plan won't get unlinked yet,
- * but that's acceptable.)
+ * reasonably sane state. (The generic plan won't get unlinked yet, but
+ * that's acceptable.)
*/
if (plansource->query_context)
{
- MemoryContext qcxt = plansource->query_context;
+ MemoryContext qcxt = plansource->query_context;
plansource->query_context = NULL;
MemoryContextDelete(qcxt);
@@ -553,7 +553,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopOverrideSearchPath();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -596,8 +596,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
/*
* Use the planner machinery to extract dependencies. Data is saved in
- * query_context. (We assume that not a lot of extra cruft is created
- * by this call.)
+ * query_context. (We assume that not a lot of extra cruft is created by
+ * this call.)
*/
extract_query_dependencies((Node *) qlist,
&plansource->relationOids,
@@ -612,12 +612,12 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->query_list = qlist;
/*
- * Note: we do not reset generic_cost or total_custom_cost, although
- * we could choose to do so. If the DDL or statistics change that
- * prompted the invalidation meant a significant change in the cost
- * estimates, it would be better to reset those variables and start
- * fresh; but often it doesn't, and we're better retaining our hard-won
- * knowledge about the relative costs.
+ * Note: we do not reset generic_cost or total_custom_cost, although we
+ * could choose to do so. If the DDL or statistics change that prompted
+ * the invalidation meant a significant change in the cost estimates, it
+ * would be better to reset those variables and start fresh; but often it
+ * doesn't, and we're better retaining our hard-won knowledge about the
+ * relative costs.
*/
plansource->is_valid = true;
@@ -728,7 +728,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -770,10 +770,10 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
}
/*
- * The planner may try to call SPI-using functions, which causes a
- * problem if we're already inside one. Rather than expect all
- * SPI-using code to do SPI_push whenever a replan could happen,
- * it seems best to take care of the case here.
+ * The planner may try to call SPI-using functions, which causes a problem
+ * if we're already inside one. Rather than expect all SPI-using code to
+ * do SPI_push whenever a replan could happen, it seems best to take care
+ * of the case here.
*/
spi_pushed = SPI_push_conditional();
@@ -865,8 +865,8 @@ choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams)
/*
* Prefer generic plan if it's less than 10% more expensive than average
* custom plan. This threshold is a bit arbitrary; it'd be better if we
- * had some means of comparing planning time to the estimated runtime
- * cost differential.
+ * had some means of comparing planning time to the estimated runtime cost
+ * differential.
*
* Note that if generic_cost is -1 (indicating we've not yet determined
* the generic plan cost), we'll always prefer generic at this point.
@@ -966,7 +966,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
{
/* otherwise, it should be a sibling of the plansource */
MemoryContextSetParent(plan->context,
- MemoryContextGetParent(plansource->context));
+ MemoryContextGetParent(plansource->context));
}
/* Update generic_cost whenever we make a new generic plan */
plansource->generic_cost = cached_plan_cost(plan);
@@ -984,7 +984,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1089,7 +1089,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1233,7 +1233,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1429,7 +1429,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 7f0e20ec174..4cbf812ed59 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2540,7 +2540,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -3365,9 +3365,9 @@ RelationGetIndexList(Relation relation)
result = insert_ordered_oid(result, index->indexrelid);
/*
- * indclass cannot be referenced directly through the C struct, because
- * it comes after the variable-width indkey field. Must extract the
- * datum the hard way...
+ * indclass cannot be referenced directly through the C struct,
+ * because it comes after the variable-width indkey field. Must
+ * extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
Anum_pg_index_indclass,
@@ -4514,8 +4514,8 @@ RelationCacheInitFilePreInvalidate(void)
/*
* The file might not be there if no backend has been started since
* the last removal. But complain about failures other than ENOENT.
- * Fortunately, it's not too late to abort the transaction if we
- * can't get rid of the would-be-obsolete init file.
+ * Fortunately, it's not too late to abort the transaction if we can't
+ * get rid of the would-be-obsolete init file.
*/
if (errno != ENOENT)
ereport(ERROR,
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 4ad5e7fc0b0..b408de0730e 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -602,10 +602,10 @@ check_TSCurrentConfig(char **newval, void **extra, GucSource source)
cfgId = get_ts_config_oid(stringToQualifiedNameList(*newval), true);
/*
- * When source == PGC_S_TEST, we are checking the argument of an
- * ALTER DATABASE SET or ALTER USER SET command. It could be that
- * the intended use of the setting is for some other database, so
- * we should not error out if the text search configuration is not
+ * When source == PGC_S_TEST, we are checking the argument of an ALTER
+ * DATABASE SET or ALTER USER SET command. It could be that the
+ * intended use of the setting is for some other database, so we
+ * should not error out if the text search configuration is not
* present in the current database. We issue a NOTICE instead.
*/
if (!OidIsValid(cfgId))
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 65c28a75080..a40b343ebcf 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -114,7 +114,7 @@ int Log_destination = LOG_DESTINATION_STDERR;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -1857,8 +1857,8 @@ setup_formatted_log_time(void)
stamp_time = (pg_time_t) tv.tv_sec;
/*
- * Note: we expect that guc.c will ensure that log_timezone is set up
- * (at least with a minimal GMT value) before Log_line_prefix can become
+ * Note: we expect that guc.c will ensure that log_timezone is set up (at
+ * least with a minimal GMT value) before Log_line_prefix can become
* nonempty or CSV mode can be selected.
*/
pg_strftime(formatted_log_time, FORMATTED_TS_LEN,
@@ -1880,8 +1880,8 @@ setup_formatted_start_time(void)
pg_time_t stamp_time = (pg_time_t) MyStartTime;
/*
- * Note: we expect that guc.c will ensure that log_timezone is set up
- * (at least with a minimal GMT value) before Log_line_prefix can become
+ * Note: we expect that guc.c will ensure that log_timezone is set up (at
+ * least with a minimal GMT value) before Log_line_prefix can become
* nonempty or CSV mode can be selected.
*/
pg_strftime(formatted_start_time, FORMATTED_TS_LEN,
@@ -2506,7 +2506,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 788f1801a80..2ec63fae568 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -408,8 +408,8 @@ fmgr_info_other_lang(Oid functionId, FmgrInfo *finfo, HeapTuple procedureTuple)
/*
* Look up the language's call handler function, ignoring any attributes
- * that would normally cause insertion of fmgr_security_definer. We
- * need to get back a bare pointer to the actual C-language function.
+ * that would normally cause insertion of fmgr_security_definer. We need
+ * to get back a bare pointer to the actual C-language function.
*/
fmgr_info_cxt_security(languageStruct->lanplcallfoid, &plfinfo,
CurrentMemoryContext, true);
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index dd914789c07..addf95bca9b 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -490,9 +490,9 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
ANYARRAYOID);
if (OidIsValid(anyrange_type))
{
- Oid subtype = resolve_generic_type(ANYELEMENTOID,
- anyrange_type,
- ANYRANGEOID);
+ Oid subtype = resolve_generic_type(ANYELEMENTOID,
+ anyrange_type,
+ ANYRANGEOID);
/* check for inconsistent array and range results */
if (OidIsValid(anyelement_type) && anyelement_type != subtype)
@@ -524,8 +524,8 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
/*
* Identify the collation to use for polymorphic OUT parameters. (It'll
* necessarily be the same for both anyelement and anyarray.) Note that
- * range types are not collatable, so any possible internal collation of
- * a range type is not considered here.
+ * range types are not collatable, so any possible internal collation of a
+ * range type is not considered here.
*/
if (OidIsValid(anyelement_type))
anycollation = get_typcollation(anyelement_type);
@@ -687,9 +687,9 @@ resolve_polymorphic_argtypes(int numargs, Oid *argtypes, char *argmodes,
ANYARRAYOID);
if (OidIsValid(anyrange_type))
{
- Oid subtype = resolve_generic_type(ANYELEMENTOID,
- anyrange_type,
- ANYRANGEOID);
+ Oid subtype = resolve_generic_type(ANYELEMENTOID,
+ anyrange_type,
+ ANYRANGEOID);
/* check for inconsistent array and range results */
if (OidIsValid(anyelement_type) && anyelement_type != subtype)
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 0f734260c16..fb376a0d271 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -631,7 +631,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and a Unix-socket-file lockfile ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 7de460e0dd3..03d68119d1c 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1341,7 +1341,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1371,7 +1371,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
@@ -1530,7 +1530,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
return false;
}
else
- { /* ASCII, single byte */
+ { /* ASCII, single byte */
if (c1 > 0x7e)
return false;
(*charptr)++;
@@ -1540,8 +1540,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
return true;
}
-
-#endif /* !FRONTEND */
+#endif /* !FRONTEND */
/*
@@ -1675,8 +1674,8 @@ mbcharacter_incrementer
pg_database_encoding_character_incrementer(void)
{
/*
- * Eventually it might be best to add a field to pg_wchar_table[],
- * but for now we just use a switch.
+ * Eventually it might be best to add a field to pg_wchar_table[], but for
+ * now we just use a switch.
*/
switch (GetDatabaseEncoding())
{
@@ -1878,10 +1877,10 @@ report_untranslatable_char(int src_encoding, int dest_encoding,
ereport(ERROR,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"",
- buf,
- pg_enc2name_tbl[src_encoding].name,
- pg_enc2name_tbl[dest_encoding].name)));
+ errmsg("character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"",
+ buf,
+ pg_enc2name_tbl[src_encoding].name,
+ pg_enc2name_tbl[dest_encoding].name)));
}
-#endif /* !FRONTEND */
+#endif /* !FRONTEND */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d75ab430296..b756e58a369 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3333,7 +3333,7 @@ static void InitializeOneGUCOption(struct config_generic * gconf);
static void push_old_value(struct config_generic * gconf, GucAction action);
static void ReportGUCOption(struct config_generic * record);
static void reapply_stacked_values(struct config_generic * variable,
- struct config_string *pHolder,
+ struct config_string * pHolder,
GucStack *stack,
const char *curvalue,
GucContext curscontext, GucSource cursource);
@@ -4143,8 +4143,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
/*
* If timezone_abbreviations wasn't set in the configuration file, install
- * the default value. We do it this way because we can't safely install
- * a "real" value until my_exec_path is set, which may not have happened
+ * the default value. We do it this way because we can't safely install a
+ * "real" value until my_exec_path is set, which may not have happened
* when InitializeGUCOptions runs, so the bootstrap default value cannot
* be the real desired default.
*/
@@ -4415,7 +4415,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5106,7 +5106,7 @@ config_enum_get_options(struct config_enum * record, const char *prefix,
*
* Return value:
* +1: the value is valid and was successfully applied.
- * 0: the name or value is invalid (but see below).
+ * 0: the name or value is invalid (but see below).
* -1: the value was not applied because of context, priority, or changeVal.
*
* If there is an error (non-existing option, invalid value) then an
@@ -6441,7 +6441,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -6469,7 +6469,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -6487,7 +6487,7 @@ define_custom_variable(struct config_generic * variable)
*/
static void
reapply_stacked_values(struct config_generic * variable,
- struct config_string *pHolder,
+ struct config_string * pHolder,
GucStack *stack,
const char *curvalue,
GucContext curscontext, GucSource cursource)
@@ -6526,7 +6526,7 @@ reapply_stacked_values(struct config_generic * variable,
case GUC_SET_LOCAL:
/* first, apply the masked value as SET */
(void) set_config_option(name, stack->masked.val.stringval,
- stack->masked_scontext, PGC_S_SESSION,
+ stack->masked_scontext, PGC_S_SESSION,
GUC_ACTION_SET, true, WARNING);
/* then apply the current value as LOCAL */
(void) set_config_option(name, curvalue,
@@ -6542,7 +6542,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -8028,8 +8028,8 @@ validate_option_array_item(const char *name, const char *value,
*
* name is not known, but exists or can be created as a placeholder (i.e.,
* it has a prefixed name). We allow this case if you're a superuser,
- * otherwise not. Superusers are assumed to know what they're doing.
- * We can't allow it for other users, because when the placeholder is
+ * otherwise not. Superusers are assumed to know what they're doing. We
+ * can't allow it for other users, because when the placeholder is
* resolved it might turn out to be a SUSET variable;
* define_custom_variable assumes we checked that.
*
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index cfb73c1b090..5713bbe12ce 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -487,7 +487,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index 7f388fd9bfc..b6d916d3e43 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -24,7 +24,7 @@
typedef struct
{
FunctionCallInfoData fcinfo; /* reusable callinfo structure */
- FmgrInfo flinfo; /* lookup data for comparison function */
+ FmgrInfo flinfo; /* lookup data for comparison function */
} SortShimExtra;
@@ -70,7 +70,6 @@ ApplySortComparator(Datum datum1, bool isNull1,
return compare;
}
-
#endif /* ! USE_INLINE */
/*
@@ -108,7 +107,7 @@ comparison_shim(Datum x, Datum y, SortSupport ssup)
void
PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup)
{
- SortShimExtra *extra;
+ SortShimExtra *extra;
extra = (SortShimExtra *) MemoryContextAlloc(ssup->ssup_cxt,
sizeof(SortShimExtra));
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 89698181dbf..d5a2003e5b8 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -195,8 +195,8 @@ typedef enum
#define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
#define MERGE_BUFFER_SIZE (BLCKSZ * 32)
-typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
+ Tuplesortstate *state);
/*
* Private state of a Tuplesort operation.
@@ -226,7 +226,7 @@ struct Tuplesortstate
* <0, 0, >0 according as a<b, a=b, a>b. The API must match
* qsort_arg_comparator.
*/
- SortTupleComparator comparetup;
+ SortTupleComparator comparetup;
/*
* Function to copy a supplied input tuple into palloc'd space and set up
@@ -342,13 +342,13 @@ struct Tuplesortstate
* tuplesort_begin_heap and used only by the MinimalTuple routines.
*/
TupleDesc tupDesc;
- SortSupport sortKeys; /* array of length nKeys */
+ SortSupport sortKeys; /* array of length nKeys */
/*
* This variable is shared by the single-key MinimalTuple case and the
* Datum case (which both use qsort_ssup()). Otherwise it's NULL.
*/
- SortSupport onlyKey;
+ SortSupport onlyKey;
/*
* These variables are specific to the CLUSTER case; they are set by
@@ -634,7 +634,7 @@ tuplesort_begin_heap(TupleDesc tupDesc,
for (i = 0; i < nkeys; i++)
{
- SortSupport sortKey = state->sortKeys + i;
+ SortSupport sortKey = state->sortKeys + i;
AssertArg(attNums[i] != 0);
AssertArg(sortOperators[i] != 0);
@@ -2685,7 +2685,7 @@ inlineApplySortFunction(FmgrInfo *sortFunction, int sk_flags, Oid collation,
static int
comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
{
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
HeapTupleData ltup;
HeapTupleData rtup;
TupleDesc tupDesc;
@@ -2806,7 +2806,7 @@ readtup_heap(Tuplesortstate *state, SortTuple *stup,
static void
reversedirection_heap(Tuplesortstate *state)
{
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
int nkey;
for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
@@ -3076,9 +3076,10 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
bool isnull[INDEX_MAX_KEYS];
/*
- * Some rather brain-dead implementations of qsort (such as the one in QNX 4)
- * will sometimes call the comparison routine to compare a value to itself,
- * but we always use our own implementation, which does not.
+ * Some rather brain-dead implementations of qsort (such as the one in
+ * QNX 4) will sometimes call the comparison routine to compare a
+ * value to itself, but we always use our own implementation, which
+ * does not.
*/
Assert(tuple1 != tuple2);
@@ -3094,8 +3095,8 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
/*
* If key values are equal, we sort on ItemPointer. This does not affect
- * validity of the finished index, but it may be useful to have index scans
- * in physical order.
+ * validity of the finished index, but it may be useful to have index
+ * scans in physical order.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
@@ -3140,8 +3141,8 @@ comparetup_index_hash(const SortTuple *a, const SortTuple *b,
/*
* If hash values are equal, we sort on ItemPointer. This does not affect
- * validity of the finished index, but it may be useful to have index scans
- * in physical order.
+ * validity of the finished index, but it may be useful to have index
+ * scans in physical order.
*/
tuple1 = (IndexTuple) a->tuple;
tuple2 = (IndexTuple) b->tuple;
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 2d30f09ac14..8a7931b8566 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -569,7 +569,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 574099dc9a3..7187ca7c98d 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -113,7 +113,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -269,23 +269,23 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
* Now we have to fix what GetSnapshotData did with MyPgXact->xmin and
* TransactionXmin. There is a race condition: to make sure we are not
* causing the global xmin to go backwards, we have to test that the
- * source transaction is still running, and that has to be done atomically.
- * So let procarray.c do it.
+ * source transaction is still running, and that has to be done
+ * atomically. So let procarray.c do it.
*
- * Note: in serializable mode, predicate.c will do this a second time.
- * It doesn't seem worth contorting the logic here to avoid two calls,
+ * Note: in serializable mode, predicate.c will do this a second time. It
+ * doesn't seem worth contorting the logic here to avoid two calls,
* especially since it's not clear that predicate.c *must* do this.
*/
if (!ProcArrayInstallImportedXmin(CurrentSnapshot->xmin, sourcexid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
- errdetail("The source transaction %u is not running anymore.",
- sourcexid)));
+ errdetail("The source transaction %u is not running anymore.",
+ sourcexid)));
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -647,8 +647,8 @@ AtEOXact_Snapshot(bool isCommit)
* RegisteredSnapshots to keep the check below happy. But we don't bother
* to do FreeSnapshot, for two reasons: the memory will go away with
* TopTransactionContext anyway, and if someone has left the snapshot
- * stacked as active, we don't want the code below to be chasing through
- * a dangling pointer.
+ * stacked as active, we don't want the code below to be chasing through a
+ * dangling pointer.
*/
if (FirstXactSnapshot != NULL)
{
@@ -668,9 +668,9 @@ AtEOXact_Snapshot(bool isCommit)
char buf[MAXPGPATH];
/*
- * Get rid of the files. Unlink failure is only a WARNING because
- * (1) it's too late to abort the transaction, and (2) leaving a
- * leaked file around has little real consequence anyway.
+ * Get rid of the files. Unlink failure is only a WARNING because (1)
+ * it's too late to abort the transaction, and (2) leaving a leaked
+ * file around has little real consequence anyway.
*/
for (i = 1; i <= list_length(exportedSnapshots); i++)
{
@@ -745,17 +745,17 @@ ExportSnapshot(Snapshot snapshot)
char pathtmp[MAXPGPATH];
/*
- * It's tempting to call RequireTransactionChain here, since it's not
- * very useful to export a snapshot that will disappear immediately
- * afterwards. However, we haven't got enough information to do that,
- * since we don't know if we're at top level or not. For example, we
- * could be inside a plpgsql function that is going to fire off other
- * transactions via dblink. Rather than disallow perfectly legitimate
- * usages, don't make a check.
+ * It's tempting to call RequireTransactionChain here, since it's not very
+ * useful to export a snapshot that will disappear immediately afterwards.
+ * However, we haven't got enough information to do that, since we don't
+ * know if we're at top level or not. For example, we could be inside a
+ * plpgsql function that is going to fire off other transactions via
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * check.
*
* Also note that we don't make any restriction on the transaction's
- * isolation level; however, importers must check the level if they
- * are serializable.
+ * isolation level; however, importers must check the level if they are
+ * serializable.
*/
/*
@@ -798,8 +798,8 @@ ExportSnapshot(Snapshot snapshot)
/*
* Fill buf with a text serialization of the snapshot, plus identification
- * data about this transaction. The format expected by ImportSnapshot
- * is pretty rigid: each line must be fieldname:value.
+ * data about this transaction. The format expected by ImportSnapshot is
+ * pretty rigid: each line must be fieldname:value.
*/
initStringInfo(&buf);
@@ -830,8 +830,8 @@ ExportSnapshot(Snapshot snapshot)
appendStringInfo(&buf, "xip:%u\n", topXid);
/*
- * Similarly, we add our subcommitted child XIDs to the subxid data.
- * Here, we have to cope with possible overflow.
+ * Similarly, we add our subcommitted child XIDs to the subxid data. Here,
+ * we have to cope with possible overflow.
*/
if (snapshot->suboverflowed ||
snapshot->subxcnt + nchildren > GetMaxSnapshotSubxidCount())
@@ -963,16 +963,16 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
- * filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
- * This is called by "SET TRANSACTION SNAPSHOT 'foo'".
+ * Import a previously exported snapshot. The argument should be a
+ * filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
+ * This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
void
ImportSnapshot(const char *idstr)
{
char path[MAXPGPATH];
FILE *f;
- struct stat stat_buf;
+ struct stat stat_buf;
char *filebuf;
int xcnt;
int i;
@@ -985,19 +985,19 @@ ImportSnapshot(const char *idstr)
/*
* Must be at top level of a fresh transaction. Note in particular that
* we check we haven't acquired an XID --- if we have, it's conceivable
- * that the snapshot would show it as not running, making for very
- * screwy behavior.
+ * that the snapshot would show it as not running, making for very screwy
+ * behavior.
*/
if (FirstSnapshotSet ||
GetTopTransactionIdIfAny() != InvalidTransactionId ||
IsSubTransaction())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION SNAPSHOT must be called before any query")));
+ errmsg("SET TRANSACTION SNAPSHOT must be called before any query")));
/*
- * If we are in read committed mode then the next query would execute
- * with a new snapshot thus making this function call quite useless.
+ * If we are in read committed mode then the next query would execute with
+ * a new snapshot thus making this function call quite useless.
*/
if (!IsolationUsesXactSnapshot())
ereport(ERROR,
@@ -1100,8 +1100,8 @@ ImportSnapshot(const char *idstr)
/*
* If we're serializable, the source transaction must be too, otherwise
- * predicate.c has problems (SxactGlobalXmin could go backwards). Also,
- * a non-read-only transaction can't adopt a snapshot from a read-only
+ * predicate.c has problems (SxactGlobalXmin could go backwards). Also, a
+ * non-read-only transaction can't adopt a snapshot from a read-only
* transaction, as predicate.c handles the cases very differently.
*/
if (IsolationIsSerializable())
@@ -1120,15 +1120,15 @@ ImportSnapshot(const char *idstr)
* We cannot import a snapshot that was taken in a different database,
* because vacuum calculates OldestXmin on a per-database basis; so the
* source transaction's xmin doesn't protect us from data loss. This
- * restriction could be removed if the source transaction were to mark
- * its xmin as being globally applicable. But that would require some
+ * restriction could be removed if the source transaction were to mark its
+ * xmin as being globally applicable. But that would require some
* additional syntax, since that has to be known when the snapshot is
* initially taken. (See pgsql-hackers discussion of 2011-10-21.)
*/
if (src_dbid != MyDatabaseId)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot import a snapshot from a different database")));
+ errmsg("cannot import a snapshot from a different database")));
/* OK, install the snapshot */
SetTransactionSnapshot(&snapshot, src_xid);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 01f73980aff..4caef9ca25e 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -1226,7 +1226,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1235,10 +1235,10 @@ bool
HeapTupleIsSurelyDead(HeapTupleHeader tuple, TransactionId OldestXmin)
{
/*
- * If the inserting transaction is marked invalid, then it aborted,
- * and the tuple is definitely dead. If it's marked neither committed
- * nor invalid, then we assume it's still alive (since the presumption
- * is that all relevant hint bits were just set moments ago).
+ * If the inserting transaction is marked invalid, then it aborted, and
+ * the tuple is definitely dead. If it's marked neither committed nor
+ * invalid, then we assume it's still alive (since the presumption is that
+ * all relevant hint bits were just set moments ago).
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
return (tuple->t_infomask & HEAP_XMIN_INVALID) != 0 ? true : false;