summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorBruce Momjian2001-10-25 05:50:21 +0000
committerBruce Momjian2001-10-25 05:50:21 +0000
commitb81844b1738c584d92330a5ccd0fbd8b603d2886 (patch)
tree4fae0d4cd26048177fc5cd1a2dd91abc99ba0f99 /src/backend/access
parent59da2105d8e6d95345b3b942a2e2aba8cead4838 (diff)
pgindent run on all C files. Java run to follow. initdb/regression
tests pass.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c9
-rw-r--r--src/backend/access/common/indextuple.c4
-rw-r--r--src/backend/access/common/printtup.c5
-rw-r--r--src/backend/access/common/tupdesc.c10
-rw-r--r--src/backend/access/gist/gist.c959
-rw-r--r--src/backend/access/gist/gistget.c10
-rw-r--r--src/backend/access/gist/gistscan.c13
-rw-r--r--src/backend/access/gist/giststrat.c3
-rw-r--r--src/backend/access/hash/hash.c35
-rw-r--r--src/backend/access/hash/hashinsert.c6
-rw-r--r--src/backend/access/hash/hashovfl.c3
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/hash/hashsearch.c3
-rw-r--r--src/backend/access/hash/hashstrat.c5
-rw-r--r--src/backend/access/heap/heapam.c85
-rw-r--r--src/backend/access/heap/hio.c61
-rw-r--r--src/backend/access/heap/stats.c8
-rw-r--r--src/backend/access/heap/tuptoaster.c40
-rw-r--r--src/backend/access/index/genam.c3
-rw-r--r--src/backend/access/index/indexam.c16
-rw-r--r--src/backend/access/index/istrat.c20
-rw-r--r--src/backend/access/nbtree/nbtcompare.c4
-rw-r--r--src/backend/access/nbtree/nbtinsert.c8
-rw-r--r--src/backend/access/nbtree/nbtpage.c6
-rw-r--r--src/backend/access/nbtree/nbtree.c64
-rw-r--r--src/backend/access/nbtree/nbtsearch.c10
-rw-r--r--src/backend/access/nbtree/nbtsort.c6
-rw-r--r--src/backend/access/nbtree/nbtstrat.c3
-rw-r--r--src/backend/access/nbtree/nbtutils.c4
-rw-r--r--src/backend/access/rtree/rtree.c167
-rw-r--r--src/backend/access/rtree/rtscan.c3
-rw-r--r--src/backend/access/rtree/rtstrat.c5
-rw-r--r--src/backend/access/transam/clog.c140
-rw-r--r--src/backend/access/transam/transam.c38
-rw-r--r--src/backend/access/transam/varsup.c50
-rw-r--r--src/backend/access/transam/xact.c40
-rw-r--r--src/backend/access/transam/xid.c5
-rw-r--r--src/backend/access/transam/xlog.c94
-rw-r--r--src/backend/access/transam/xlogutils.c6
39 files changed, 1008 insertions, 959 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index ef6cedd92d2..5b005b3163c 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.73 2001/08/23 23:06:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.74 2001/10/25 05:49:20 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -241,7 +241,6 @@ nocachegetattr(HeapTuple tuple,
}
else
{
-
/*
* there's a null somewhere in the tuple
*/
@@ -347,7 +346,6 @@ nocachegetattr(HeapTuple tuple,
(HeapTupleNoNulls(tuple) || !att_isnull(j, bp)) &&
(HeapTupleAllFixed(tuple) || att[j]->attlen > 0)); j++)
{
-
/*
* Fix me when going to a machine with more than a four-byte
* word!
@@ -546,7 +544,6 @@ heap_deformtuple(HeapTuple tuple,
nulls[i] = ' ';
}
}
-
#endif
/* ----------------
@@ -739,7 +736,7 @@ heap_freetuple(HeapTuple htup)
*
* This routine forms a HeapTuple by copying the given structure (tuple
* data) and adding a generic header. Note that the tuple data is
- * presumed to contain no null fields. It is typically only useful
+ * presumed to contain no null fields. It is typically only useful
* for null-free system tables.
* ----------------
*/
@@ -771,7 +768,7 @@ heap_addheader(int natts, /* max domain index */
td->t_hoff = hoff;
td->t_natts = natts;
- td->t_infomask = HEAP_XMAX_INVALID; /* XXX sufficient? */
+ td->t_infomask = HEAP_XMAX_INVALID; /* XXX sufficient? */
memcpy((char *) td + hoff, structure, structlen);
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 5c165e2c779..ad6e7cc4e5b 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.54 2001/03/22 06:16:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.55 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,7 +49,6 @@ index_formtuple(TupleDesc tupleDescriptor,
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
-
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@@ -338,7 +337,6 @@ nocache_index_getattr(IndexTuple tup,
for (; j <= attnum; j++)
{
-
/*
* Fix me when going to a machine with more than a four-byte
* word!
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index da95edfc1fe..ea10330bf87 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.59 2001/03/22 06:16:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.60 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -197,7 +197,6 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
-
/*
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
@@ -306,7 +305,6 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
-
/*
* If we have a toasted datum, forcibly detoast it here to
* avoid memory leakage inside the type's output routine.
@@ -401,7 +399,6 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
-
/*
* If we have a toasted datum, must detoast before sending.
*/
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 37e895479e2..4b9d53df347 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.75 2001/06/25 21:11:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.76 2001/10/25 05:49:20 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -238,9 +238,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
Form_pg_attribute attr2 = tupdesc2->attrs[i];
/*
- * We do not need to check every single field here: we can disregard
- * attrelid, attnum (it was used to place the row in the attrs array)
- * and everything derived from the column datatype.
+ * We do not need to check every single field here: we can
+ * disregard attrelid, attnum (it was used to place the row in the
+ * attrs array) and everything derived from the column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -399,7 +399,6 @@ TupleDescInitEntry(TupleDesc desc,
0, 0, 0);
if (!HeapTupleIsValid(tuple))
{
-
/*
* here type info does not exist yet so we just fill the attribute
* with dummy information and return false.
@@ -585,7 +584,6 @@ BuildDescForRelation(List *schema, char *relname)
typenameTypeId(typename),
atttypmod, attdim, attisset))
{
-
/*
* if TupleDescInitEntry() fails, it means there is no type in
* the system catalogs. So now we check if the type name
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 4a702fe3174..0c4e698f016 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.84 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.85 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,26 +36,26 @@
/* group flags ( in gistSplit ) */
#define LEFT_ADDED 0x01
-#define RIGHT_ADDED 0x02
+#define RIGHT_ADDED 0x02
#define BOTH_ADDED ( LEFT_ADDED | RIGHT_ADDED )
/*
* This defines only for shorter code, used in gistgetadjusted
* and gistadjsubkey only
*/
-#define FILLITEM(evp, isnullkey, okey, okeyb, rkey, rkeyb) do { \
- if ( isnullkey ) { \
- gistentryinit((evp), rkey, r, (Page) NULL , \
- (OffsetNumber) 0, rkeyb, FALSE); \
- } else { \
- gistentryinit((evp), okey, r, (Page) NULL, \
- (OffsetNumber) 0, okeyb, FALSE); \
- } \
+#define FILLITEM(evp, isnullkey, okey, okeyb, rkey, rkeyb) do { \
+ if ( isnullkey ) { \
+ gistentryinit((evp), rkey, r, (Page) NULL , \
+ (OffsetNumber) 0, rkeyb, FALSE); \
+ } else { \
+ gistentryinit((evp), okey, r, (Page) NULL, \
+ (OffsetNumber) 0, okeyb, FALSE); \
+ } \
} while(0)
#define FILLEV(isnull1, key1, key1b, isnull2, key2, key2b) do { \
- FILLITEM(*ev0p, isnull1, key1, key1b, key2, key2b); \
- FILLITEM(*ev1p, isnull2, key2, key2b, key1, key1b); \
+ FILLITEM(*ev0p, isnull1, key1, key1b, key2, key2b); \
+ FILLITEM(*ev1p, isnull2, key2, key2b, key1, key1b); \
} while(0);
/* Working state for gistbuild and its callback */
@@ -69,11 +69,11 @@ typedef struct
/* non-export function prototypes */
static void gistbuildCallback(Relation index,
- HeapTuple htup,
- Datum *attdata,
- char *nulls,
- bool tupleIsAlive,
- void *state);
+ HeapTuple htup,
+ Datum *attdata,
+ char *nulls,
+ bool tupleIsAlive,
+ void *state);
static void gistdoinsert(Relation r,
IndexTuple itup,
InsertIndexResult *res,
@@ -103,14 +103,14 @@ static IndexTuple gistgetadjusted(Relation r,
IndexTuple oldtup,
IndexTuple addtup,
GISTSTATE *giststate);
-static int gistfindgroup( GISTSTATE *giststate,
- GISTENTRY *valvec, GIST_SPLITVEC * spl );
+static int gistfindgroup(GISTSTATE *giststate,
+ GISTENTRY *valvec, GIST_SPLITVEC *spl);
static void gistadjsubkey(Relation r,
- IndexTuple *itup, int *len,
- GIST_SPLITVEC *v,
- GISTSTATE *giststate);
-static IndexTuple gistFormTuple( GISTSTATE *giststate,
- Relation r, Datum attdata[], int datumsize[], bool isnull[] );
+ IndexTuple *itup, int *len,
+ GIST_SPLITVEC *v,
+ GISTSTATE *giststate);
+static IndexTuple gistFormTuple(GISTSTATE *giststate,
+ Relation r, Datum attdata[], int datumsize[], bool isnull[]);
static IndexTuple *gistSplit(Relation r,
Buffer buffer,
IndexTuple *itup,
@@ -124,6 +124,7 @@ static OffsetNumber gistchoose(Relation r, Page p,
IndexTuple it,
GISTSTATE *giststate);
static void gistdelete(Relation r, ItemPointer tid);
+
#ifdef GIST_PAGEADDITEM
static IndexTuple gist_tuple_replacekey(Relation r,
GISTENTRY entry, IndexTuple t);
@@ -132,14 +133,15 @@ static void gistcentryinit(GISTSTATE *giststate, int nkey,
GISTENTRY *e, Datum k,
Relation r, Page pg,
OffsetNumber o, int b, bool l, bool isNull);
-static void gistDeCompressAtt( GISTSTATE *giststate, Relation r,
- IndexTuple tuple, Page p, OffsetNumber o,
- GISTENTRY attdata[], bool decompvec[], bool isnull[] );
-static void gistFreeAtt( Relation r, GISTENTRY attdata[], bool decompvec[] );
-static void gistpenalty( GISTSTATE *giststate, int attno,
+static void gistDeCompressAtt(GISTSTATE *giststate, Relation r,
+ IndexTuple tuple, Page p, OffsetNumber o,
+ GISTENTRY attdata[], bool decompvec[], bool isnull[]);
+static void gistFreeAtt(Relation r, GISTENTRY attdata[], bool decompvec[]);
+static void gistpenalty(GISTSTATE *giststate, int attno,
GISTENTRY *key1, bool isNull1,
- GISTENTRY *key2, bool isNull2,
- float *penalty );
+ GISTENTRY *key2, bool isNull2,
+ float *penalty);
+
#undef GISTDEBUG
#ifdef GISTDEBUG
@@ -182,7 +184,7 @@ gistbuild(PG_FUNCTION_ARGS)
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
- gistbuildCallback, (void *) &buildstate);
+ gistbuildCallback, (void *) &buildstate);
/* okay, all heap tuples are indexed */
@@ -208,7 +210,7 @@ gistbuild(PG_FUNCTION_ARGS)
UpdateStats(irelid, buildstate.indtuples);
}
- freeGISTstate( &buildstate.giststate );
+ freeGISTstate(&buildstate.giststate);
#ifdef GISTDEBUG
gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif
@@ -227,28 +229,31 @@ gistbuildCallback(Relation index,
bool tupleIsAlive,
void *state)
{
- GISTBuildState *buildstate = (GISTBuildState *) state;
+ GISTBuildState *buildstate = (GISTBuildState *) state;
IndexTuple itup;
bool compvec[INDEX_MAX_KEYS];
GISTENTRY tmpcentry;
int i;
- /* GiST cannot index tuples with leading NULLs */
- if ( nulls[0] == 'n' )
- return;
+ /* GiST cannot index tuples with leading NULLs */
+ if (nulls[0] == 'n')
+ return;
/* immediately compress keys to normalize */
for (i = 0; i < buildstate->numindexattrs; i++)
{
- if ( nulls[i]=='n' ) {
+ if (nulls[i] == 'n')
+ {
attdata[i] = (Datum) 0;
compvec[i] = FALSE;
- } else {
+ }
+ else
+ {
gistcentryinit(&buildstate->giststate, i, &tmpcentry, attdata[i],
- (Relation) NULL, (Page) NULL, (OffsetNumber) 0,
- -1 /* size is currently bogus */ , TRUE, FALSE);
+ (Relation) NULL, (Page) NULL, (OffsetNumber) 0,
+ -1 /* size is currently bogus */ , TRUE, FALSE);
if (attdata[i] != tmpcentry.key &&
- !( isAttByVal(&buildstate->giststate, i)))
+ !(isAttByVal(&buildstate->giststate, i)))
compvec[i] = TRUE;
else
compvec[i] = FALSE;
@@ -262,10 +267,10 @@ gistbuildCallback(Relation index,
/*
* Since we already have the index relation locked, we call
- * gistdoinsert directly. Normal access method calls dispatch
- * through gistinsert, which locks the relation for write. This
- * is the right thing to do if you're inserting single tups, but
- * not when you're initializing the whole index at once.
+ * gistdoinsert directly. Normal access method calls dispatch through
+ * gistinsert, which locks the relation for write. This is the right
+ * thing to do if you're inserting single tups, but not when you're
+ * initializing the whole index at once.
*/
gistdoinsert(index, itup, NULL, &buildstate->giststate);
@@ -291,6 +296,7 @@ gistinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
@@ -303,29 +309,33 @@ gistinsert(PG_FUNCTION_ARGS)
/*
* Since GIST is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
+ * have acquired exclusive lock on index relation. We need no locking
* here.
*/
- /* GiST cannot index tuples with leading NULLs */
- if ( nulls[0] == 'n' ) {
+ /* GiST cannot index tuples with leading NULLs */
+ if (nulls[0] == 'n')
+ {
res = NULL;
PG_RETURN_POINTER(res);
- }
+ }
initGISTstate(&giststate, r);
/* immediately compress keys to normalize */
for (i = 0; i < r->rd_att->natts; i++)
{
- if ( nulls[i]=='n' ) {
+ if (nulls[i] == 'n')
+ {
datum[i] = (Datum) 0;
compvec[i] = FALSE;
- } else {
+ }
+ else
+ {
gistcentryinit(&giststate, i, &tmpentry, datum[i],
- (Relation) NULL, (Page) NULL, (OffsetNumber) 0,
- -1 /* size is currently bogus */ , TRUE, FALSE );
- if (datum[i] != tmpentry.key && !( isAttByVal( &giststate, i)))
+ (Relation) NULL, (Page) NULL, (OffsetNumber) 0,
+ -1 /* size is currently bogus */ , TRUE, FALSE);
+ if (datum[i] != tmpentry.key && !(isAttByVal(&giststate, i)))
compvec[i] = TRUE;
else
compvec[i] = FALSE;
@@ -342,7 +352,7 @@ gistinsert(PG_FUNCTION_ARGS)
if (compvec[i] == TRUE)
pfree(DatumGetPointer(datum[i]));
pfree(itup);
- freeGISTstate( &giststate );
+ freeGISTstate(&giststate);
PG_RETURN_POINTER(res);
}
@@ -376,12 +386,12 @@ gistPageAddItem(GISTSTATE *giststate,
* offset for insertion
*/
datum = index_getattr(itup, 1, r->rd_att, &IsNull);
- gistdentryinit(giststate, 0,dentry, datum,
+ gistdentryinit(giststate, 0, dentry, datum,
(Relation) 0, (Page) 0,
(OffsetNumber) InvalidOffsetNumber,
- ATTSIZE( datum, r, 1, IsNull ),
+ ATTSIZE(datum, r, 1, IsNull),
FALSE, IsNull);
- gistcentryinit(giststate, 0,&tmpcentry, dentry->key, r, page,
+ gistcentryinit(giststate, 0, &tmpcentry, dentry->key, r, page,
offsetNumber, dentry->bytes, FALSE);
*newtup = gist_tuple_replacekey(r, tmpcentry, itup);
retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
@@ -392,7 +402,7 @@ gistPageAddItem(GISTSTATE *giststate,
/* be tidy */
if (DatumGetPointer(tmpcentry.key) != NULL &&
tmpcentry.key != dentry->key &&
- tmpcentry.key != datum )
+ tmpcentry.key != datum)
pfree(DatumGetPointer(tmpcentry.key));
return (retval);
}
@@ -471,6 +481,7 @@ gistlayerinsert(Relation r, BlockNumber blkno,
if (!(ret & SPLITED))
{
IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
+
if (!newtup)
{
/* not need to update key */
@@ -492,8 +503,10 @@ gistlayerinsert(Relation r, BlockNumber blkno,
if (gistnospace(page, (*itup), *len))
{
/* no space for insertion */
- IndexTuple *itvec, *newitup;
- int tlen,oldlen;
+ IndexTuple *itvec,
+ *newitup;
+ int tlen,
+ oldlen;
ret |= SPLITED;
itvec = gistreadbuffer(r, buffer, &tlen);
@@ -503,9 +516,9 @@ gistlayerinsert(Relation r, BlockNumber blkno,
(opaque->flags & F_LEAF) ? res : NULL); /* res only for
* inserting in leaf */
ReleaseBuffer(buffer);
- do
- pfree( (*itup)[ oldlen-1 ] );
- while ( (--oldlen) > 0 );
+ do
+ pfree((*itup)[oldlen - 1]);
+ while ((--oldlen) > 0);
pfree((*itup));
pfree(itvec);
*itup = newitup;
@@ -539,7 +552,8 @@ gistlayerinsert(Relation r, BlockNumber blkno,
* parent
*/
IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
- ItemPointerSet(&(newtup->t_tid), blkno, 1);
+
+ ItemPointerSet(&(newtup->t_tid), blkno, 1);
for (i = 0; i < *len; i++)
pfree((*itup)[i]);
@@ -560,10 +574,11 @@ gistwritebuffer(Relation r, Page page, IndexTuple *itup,
{
OffsetNumber l = InvalidOffsetNumber;
int i;
+
#ifdef GIST_PAGEADDITEM
GISTENTRY tmpdentry;
IndexTuple newtup;
- bool IsNull;
+ bool IsNull;
#endif
for (i = 0; i < len; i++)
{
@@ -573,13 +588,13 @@ gistwritebuffer(Relation r, Page page, IndexTuple *itup,
off, LP_USED, &tmpdentry, &newtup);
off = OffsetNumberNext(off);
if (DatumGetPointer(tmpdentry.key) != NULL &&
- tmpdentry.key != index_getattr(itup[i], 1, r->rd_att, &IsNull))
+ tmpdentry.key != index_getattr(itup[i], 1, r->rd_att, &IsNull))
pfree(DatumGetPointer(tmpdentry.key));
if (itup[i] != newtup)
pfree(newtup);
#else
l = PageAddItem(page, (Item) itup[i], IndexTupleSize(itup[i]),
- off, LP_USED);
+ off, LP_USED);
if (l == InvalidOffsetNumber)
elog(ERROR, "gist: failed to add index item to %s",
RelationGetRelationName(r));
@@ -618,7 +633,7 @@ gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
*len = maxoff;
itvec = palloc(sizeof(IndexTuple) * maxoff);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
- itvec[i-1] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
+ itvec[i - 1] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec;
}
@@ -639,74 +654,85 @@ gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
* return union of itup vector
*/
static IndexTuple
-gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) {
+gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
+{
Datum attr[INDEX_MAX_KEYS];
- bool whatfree[INDEX_MAX_KEYS];
+ bool whatfree[INDEX_MAX_KEYS];
char isnull[INDEX_MAX_KEYS];
bytea *evec;
Datum datum;
int datumsize,
- i,j;
+ i,
+ j;
GISTENTRY centry[INDEX_MAX_KEYS];
- bool *needfree;
- IndexTuple newtup;
- bool IsNull;
- int reallen;
+ bool *needfree;
+ IndexTuple newtup;
+ bool IsNull;
+ int reallen;
- needfree = (bool *) palloc((( len==1 ) ? 2 : len ) * sizeof(bool));
- evec = (bytea *) palloc((( len==1 ) ? 2 : len ) * sizeof(GISTENTRY) + VARHDRSZ);
+ needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
+ evec = (bytea *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + VARHDRSZ);
- for (j = 0; j < r->rd_att->natts; j++) {
- reallen=0;
- for (i = 0; i < len; i++) {
- datum = index_getattr(itvec[i], j+1, giststate->tupdesc, &IsNull);
- if ( IsNull ) continue;
+ for (j = 0; j < r->rd_att->natts; j++)
+ {
+ reallen = 0;
+ for (i = 0; i < len; i++)
+ {
+ datum = index_getattr(itvec[i], j + 1, giststate->tupdesc, &IsNull);
+ if (IsNull)
+ continue;
gistdentryinit(giststate, j,
&((GISTENTRY *) VARDATA(evec))[reallen],
datum,
- (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
- ATTSIZE( datum, giststate->tupdesc, j+1, IsNull ), FALSE, IsNull);
- if ( (!isAttByVal(giststate,j)) &&
- ((GISTENTRY *) VARDATA(evec))[reallen].key != datum )
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ ATTSIZE(datum, giststate->tupdesc, j + 1, IsNull), FALSE, IsNull);
+ if ((!isAttByVal(giststate, j)) &&
+ ((GISTENTRY *) VARDATA(evec))[reallen].key != datum)
needfree[reallen] = TRUE;
else
needfree[reallen] = FALSE;
reallen++;
}
- if ( reallen == 0 ) {
+ if (reallen == 0)
+ {
attr[j] = (Datum) 0;
isnull[j] = 'n';
whatfree[j] = FALSE;
- } else {
- if ( reallen == 1 ) {
+ }
+ else
+ {
+ if (reallen == 1)
+ {
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
- gistentryinit(((GISTENTRY *) VARDATA(evec))[1],
- ((GISTENTRY *) VARDATA(evec))[0].key, r, (Page) NULL,
- (OffsetNumber) 0, ((GISTENTRY *) VARDATA(evec))[0].bytes, FALSE);
+ gistentryinit(((GISTENTRY *) VARDATA(evec))[1],
+ ((GISTENTRY *) VARDATA(evec))[0].key, r, (Page) NULL,
+ (OffsetNumber) 0, ((GISTENTRY *) VARDATA(evec))[0].bytes, FALSE);
- } else {
- VARATT_SIZEP(evec) = reallen * sizeof(GISTENTRY) + VARHDRSZ;
}
+ else
+ VARATT_SIZEP(evec) = reallen * sizeof(GISTENTRY) + VARHDRSZ;
datum = FunctionCall2(&giststate->unionFn[j],
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize));
-
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize));
+
for (i = 0; i < reallen; i++)
- if ( needfree[i] )
+ if (needfree[i])
pfree(DatumGetPointer(((GISTENTRY *) VARDATA(evec))[i].key));
gistcentryinit(giststate, j, &centry[j], datum,
- (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
- datumsize, FALSE, FALSE);
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize, FALSE, FALSE);
isnull[j] = ' ';
- attr[j] = centry[j].key;
- if ( !isAttByVal( giststate, j ) ) {
+ attr[j] = centry[j].key;
+ if (!isAttByVal(giststate, j))
+ {
whatfree[j] = TRUE;
- if ( centry[j].key != datum )
+ if (centry[j].key != datum)
pfree(DatumGetPointer(datum));
- } else
+ }
+ else
whatfree[j] = FALSE;
}
}
@@ -716,7 +742,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) {
newtup = (IndexTuple) index_formtuple(giststate->tupdesc, attr, isnull);
for (j = 0; j < r->rd_att->natts; j++)
- if ( whatfree[j] )
+ if (whatfree[j])
pfree(DatumGetPointer(attr[j]));
return newtup;
@@ -732,7 +758,8 @@ gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gis
bytea *evec;
Datum datum;
int datumsize;
- bool result, neednew = false;
+ bool result,
+ neednew = false;
char isnull[INDEX_MAX_KEYS],
whatfree[INDEX_MAX_KEYS];
Datum attr[INDEX_MAX_KEYS];
@@ -744,104 +771,125 @@ gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *gis
bool olddec[INDEX_MAX_KEYS],
adddec[INDEX_MAX_KEYS];
bool oldisnull[INDEX_MAX_KEYS],
- addisnull[INDEX_MAX_KEYS];
+ addisnull[INDEX_MAX_KEYS];
IndexTuple newtup = NULL;
- int j;
+ int j;
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
-
- gistDeCompressAtt( giststate, r, oldtup, (Page) NULL,
- (OffsetNumber) 0, oldatt, olddec, oldisnull);
- gistDeCompressAtt( giststate, r, addtup, (Page) NULL,
- (OffsetNumber) 0, addatt, adddec, addisnull);
+ gistDeCompressAtt(giststate, r, oldtup, (Page) NULL,
+ (OffsetNumber) 0, oldatt, olddec, oldisnull);
+
+ gistDeCompressAtt(giststate, r, addtup, (Page) NULL,
+ (OffsetNumber) 0, addatt, adddec, addisnull);
+
-
- for( j=0; j<r->rd_att->natts; j++ ) {
- if ( oldisnull[j] && addisnull[j] ) {
+ for (j = 0; j < r->rd_att->natts; j++)
+ {
+ if (oldisnull[j] && addisnull[j])
+ {
isnull[j] = 'n';
attr[j] = (Datum) 0;
whatfree[j] = FALSE;
- } else {
+ }
+ else
+ {
FILLEV(
- oldisnull[j], oldatt[j].key, oldatt[j].bytes,
- addisnull[j], addatt[j].key, addatt[j].bytes
- );
-
+ oldisnull[j], oldatt[j].key, oldatt[j].bytes,
+ addisnull[j], addatt[j].key, addatt[j].bytes
+ );
+
datum = FunctionCall2(&giststate->unionFn[j],
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize));
- if ( oldisnull[j] || addisnull[j] ) {
- if ( oldisnull[j] )
+ if (oldisnull[j] || addisnull[j])
+ {
+ if (oldisnull[j])
neednew = true;
- } else {
+ }
+ else
+ {
FunctionCall3(&giststate->equalFn[j],
- ev0p->key,
- datum,
- PointerGetDatum(&result));
-
- if ( !result )
+ ev0p->key,
+ datum,
+ PointerGetDatum(&result));
+
+ if (!result)
neednew = true;
}
- if ( olddec[j] ) pfree( DatumGetPointer(oldatt[j].key) );
- if ( adddec[j] ) pfree( DatumGetPointer(addatt[j].key) );
+ if (olddec[j])
+ pfree(DatumGetPointer(oldatt[j].key));
+ if (adddec[j])
+ pfree(DatumGetPointer(addatt[j].key));
gistcentryinit(giststate, j, &centry[j], datum,
- (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
- datumsize, FALSE, FALSE);
-
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize, FALSE, FALSE);
+
isnull[j] = ' ';
- attr[j] = centry[j].key;
- if ( (!isAttByVal( giststate, j ) ) ) {
+ attr[j] = centry[j].key;
+ if ((!isAttByVal(giststate, j)))
+ {
whatfree[j] = TRUE;
- if ( centry[j].key != datum )
+ if (centry[j].key != datum)
pfree(DatumGetPointer(datum));
- } else
+ }
+ else
whatfree[j] = FALSE;
}
- }
+ }
pfree(evec);
- if (neednew) {
+ if (neednew)
+ {
/* need to update key */
newtup = (IndexTuple) index_formtuple(giststate->tupdesc, attr, isnull);
newtup->t_tid = oldtup->t_tid;
}
-
+
for (j = 0; j < r->rd_att->natts; j++)
- if ( whatfree[j] )
+ if (whatfree[j])
pfree(DatumGetPointer(attr[j]));
return newtup;
}
static void
-gistunionsubkey( Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITVEC * spl ) {
- int i,j,lr;
+gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITVEC *spl)
+{
+ int i,
+ j,
+ lr;
Datum *attr;
- bool *needfree, IsNull;
- int len, *attrsize;
- OffsetNumber *entries;
- bytea *evec;
+ bool *needfree,
+ IsNull;
+ int len,
+ *attrsize;
+ OffsetNumber *entries;
+ bytea *evec;
Datum datum;
- int datumsize;
- int reallen;
- bool *isnull;
+ int datumsize;
+ int reallen;
+ bool *isnull;
- for(lr=0;lr<=1;lr++) {
- if ( lr ) {
+ for (lr = 0; lr <= 1; lr++)
+ {
+ if (lr)
+ {
attrsize = spl->spl_lattrsize;
attr = spl->spl_lattr;
len = spl->spl_nleft;
entries = spl->spl_left;
isnull = spl->spl_lisnull;
- } else {
+ }
+ else
+ {
attrsize = spl->spl_rattrsize;
attr = spl->spl_rattr;
len = spl->spl_nright;
@@ -849,58 +897,64 @@ gistunionsubkey( Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLIT
isnull = spl->spl_risnull;
}
- needfree = (bool *) palloc( (( len==1 ) ? 2 : len ) * sizeof(bool));
- evec = (bytea *) palloc( (( len==1 ) ? 2 : len ) * sizeof(GISTENTRY) + VARHDRSZ);
- for (j = 1; j < r->rd_att->natts; j++) {
+ needfree = (bool *) palloc(((len == 1) ? 2 : len) * sizeof(bool));
+ evec = (bytea *) palloc(((len == 1) ? 2 : len) * sizeof(GISTENTRY) + VARHDRSZ);
+ for (j = 1; j < r->rd_att->natts; j++)
+ {
reallen = 0;
- for (i = 0; i < len; i++) {
- if ( spl->spl_idgrp[ entries[i] ] )
+ for (i = 0; i < len; i++)
+ {
+ if (spl->spl_idgrp[entries[i]])
continue;
- datum = index_getattr(itvec[ entries[i]-1 ], j+1,
+ datum = index_getattr(itvec[entries[i] - 1], j + 1,
giststate->tupdesc, &IsNull);
- if ( IsNull )
+ if (IsNull)
continue;
gistdentryinit(giststate, j,
&((GISTENTRY *) VARDATA(evec))[reallen],
datum,
(Relation) NULL, (Page) NULL,
(OffsetNumber) NULL,
- ATTSIZE( datum, giststate->tupdesc, j+1, IsNull ), FALSE, IsNull);
- if ( (!isAttByVal( giststate, j )) &&
- ((GISTENTRY *) VARDATA(evec))[reallen].key != datum )
+ ATTSIZE(datum, giststate->tupdesc, j + 1, IsNull), FALSE, IsNull);
+ if ((!isAttByVal(giststate, j)) &&
+ ((GISTENTRY *) VARDATA(evec))[reallen].key != datum)
needfree[reallen] = TRUE;
else
needfree[reallen] = FALSE;
reallen++;
-
- }
- if ( reallen == 0 )
+
+ }
+ if (reallen == 0)
{
datum = (Datum) 0;
datumsize = 0;
isnull[j] = true;
- } else {
+ }
+ else
+ {
/*
- * ((GISTENTRY *) VARDATA(evec))[0].bytes may be not defined,
- * so form union with itself
+ * ((GISTENTRY *) VARDATA(evec))[0].bytes may be not
+ * defined, so form union with itself
*/
- if ( reallen == 1 ) {
+ if (reallen == 1)
+ {
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
- memcpy( (void*) &((GISTENTRY *) VARDATA(evec))[1],
- (void*) &((GISTENTRY *) VARDATA(evec))[0],
- sizeof( GISTENTRY ) );
- } else
+ memcpy((void *) &((GISTENTRY *) VARDATA(evec))[1],
+ (void *) &((GISTENTRY *) VARDATA(evec))[0],
+ sizeof(GISTENTRY));
+ }
+ else
VARATT_SIZEP(evec) = reallen * sizeof(GISTENTRY) + VARHDRSZ;
datum = FunctionCall2(&giststate->unionFn[j],
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize));
isnull[j] = false;
- }
+ }
for (i = 0; i < reallen; i++)
- if ( needfree[i] )
+ if (needfree[i])
pfree(DatumGetPointer(((GISTENTRY *) VARDATA(evec))[i].key));
-
+
attr[j] = datum;
attrsize[j] = datumsize;
}
@@ -910,51 +964,63 @@ gistunionsubkey( Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLIT
}
/*
- * find group in vector with equial value
+ * find group in vector with equial value
*/
static int
-gistfindgroup( GISTSTATE *giststate, GISTENTRY *valvec, GIST_SPLITVEC * spl ) {
- int i,j,len;
- int curid = 1;
- bool result;
-
- /*
- * first key is always not null (see gistinsert),
- * so we may not check for nulls
- */
-
- for(i=0; i<spl->spl_nleft; i++) {
- if ( spl->spl_idgrp[ spl->spl_left[i] ]) continue;
+gistfindgroup(GISTSTATE *giststate, GISTENTRY *valvec, GIST_SPLITVEC *spl)
+{
+ int i,
+ j,
+ len;
+ int curid = 1;
+ bool result;
+
+ /*
+ * first key is always not null (see gistinsert), so we may not check
+ * for nulls
+ */
+
+ for (i = 0; i < spl->spl_nleft; i++)
+ {
+ if (spl->spl_idgrp[spl->spl_left[i]])
+ continue;
len = 0;
/* find all equal value in right part */
- for(j=0; j < spl->spl_nright; j++) {
- if ( spl->spl_idgrp[ spl->spl_right[j] ]) continue;
+ for (j = 0; j < spl->spl_nright; j++)
+ {
+ if (spl->spl_idgrp[spl->spl_right[j]])
+ continue;
FunctionCall3(&giststate->equalFn[0],
- valvec[ spl->spl_left[i] ].key,
- valvec[ spl->spl_right[j] ].key,
+ valvec[spl->spl_left[i]].key,
+ valvec[spl->spl_right[j]].key,
PointerGetDatum(&result));
- if ( result ) {
- spl->spl_idgrp[ spl->spl_right[j] ] = curid;
+ if (result)
+ {
+ spl->spl_idgrp[spl->spl_right[j]] = curid;
len++;
}
}
/* find all other equal value in left part */
- if ( len ) {
- /* add current val to list of equial values*/
- spl->spl_idgrp[ spl->spl_left[i] ]=curid;
+ if (len)
+ {
+ /* add current val to list of equial values */
+ spl->spl_idgrp[spl->spl_left[i]] = curid;
/* searching .. */
- for(j=i+1; j < spl->spl_nleft; j++) {
- if ( spl->spl_idgrp[ spl->spl_left[j] ]) continue;
+ for (j = i + 1; j < spl->spl_nleft; j++)
+ {
+ if (spl->spl_idgrp[spl->spl_left[j]])
+ continue;
FunctionCall3(&giststate->equalFn[0],
- valvec[ spl->spl_left[i] ].key,
- valvec[ spl->spl_left[j] ].key,
+ valvec[spl->spl_left[i]].key,
+ valvec[spl->spl_left[j]].key,
PointerGetDatum(&result));
- if ( result ) {
- spl->spl_idgrp[ spl->spl_left[j] ] = curid;
+ if (result)
+ {
+ spl->spl_idgrp[spl->spl_left[j]] = curid;
len++;
}
}
- spl->spl_ngrp[curid] = len+1;
+ spl->spl_ngrp[curid] = len + 1;
curid++;
}
}
@@ -968,40 +1034,50 @@ gistfindgroup( GISTSTATE *giststate, GISTENTRY *valvec, GIST_SPLITVEC * spl ) {
*/
static void
gistadjsubkey(Relation r,
- IndexTuple *itup, /* contains compressed entry */
- int *len,
- GIST_SPLITVEC *v,
- GISTSTATE *giststate
- ) {
- int curlen;
- OffsetNumber *curwpos;
- bool decfree[INDEX_MAX_KEYS];
- GISTENTRY entry,identry[INDEX_MAX_KEYS], *ev0p, *ev1p;
- float lpenalty, rpenalty;
- bytea *evec;
- int datumsize;
- bool isnull[INDEX_MAX_KEYS];
- int i,j;
+ IndexTuple *itup, /* contains compressed entry */
+ int *len,
+ GIST_SPLITVEC *v,
+ GISTSTATE *giststate
+)
+{
+ int curlen;
+ OffsetNumber *curwpos;
+ bool decfree[INDEX_MAX_KEYS];
+ GISTENTRY entry,
+ identry[INDEX_MAX_KEYS],
+ *ev0p,
+ *ev1p;
+ float lpenalty,
+ rpenalty;
+ bytea *evec;
+ int datumsize;
+ bool isnull[INDEX_MAX_KEYS];
+ int i,
+ j;
Datum datum;
/* clear vectors */
curlen = v->spl_nleft;
curwpos = v->spl_left;
- for( i=0; i<v->spl_nleft;i++ )
- if ( v->spl_idgrp[ v->spl_left[i] ] == 0 ) {
+ for (i = 0; i < v->spl_nleft; i++)
+ if (v->spl_idgrp[v->spl_left[i]] == 0)
+ {
*curwpos = v->spl_left[i];
curwpos++;
- } else
+ }
+ else
curlen--;
v->spl_nleft = curlen;
curlen = v->spl_nright;
curwpos = v->spl_right;
- for( i=0; i<v->spl_nright;i++ )
- if ( v->spl_idgrp[ v->spl_right[i] ] == 0 ) {
+ for (i = 0; i < v->spl_nright; i++)
+ if (v->spl_idgrp[v->spl_right[i]] == 0)
+ {
*curwpos = v->spl_right[i];
curwpos++;
- } else
+ }
+ else
curlen--;
v->spl_nright = curlen;
@@ -1011,82 +1087,98 @@ gistadjsubkey(Relation r,
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
/* add equivalent tuple */
- for(i = 0; i< *len; i++) {
- if ( v->spl_idgrp[ i+1 ]==0 ) /* already inserted */
+ for (i = 0; i < *len; i++)
+ {
+ if (v->spl_idgrp[i + 1] == 0) /* already inserted */
continue;
- gistDeCompressAtt( giststate, r, itup[i], (Page) NULL, (OffsetNumber) 0,
- identry, decfree, isnull);
+ gistDeCompressAtt(giststate, r, itup[i], (Page) NULL, (OffsetNumber) 0,
+ identry, decfree, isnull);
- v->spl_ngrp[ v->spl_idgrp[ i+1 ] ]--;
- if ( v->spl_ngrp[ v->spl_idgrp[ i+1 ] ] == 0 &&
- (v->spl_grpflag[ v->spl_idgrp[ i+1 ] ] & BOTH_ADDED) != BOTH_ADDED ) {
+ v->spl_ngrp[v->spl_idgrp[i + 1]]--;
+ if (v->spl_ngrp[v->spl_idgrp[i + 1]] == 0 &&
+ (v->spl_grpflag[v->spl_idgrp[i + 1]] & BOTH_ADDED) != BOTH_ADDED)
+ {
/* force last in group */
rpenalty = 1.0;
- lpenalty = ( v->spl_grpflag[ v->spl_idgrp[ i+1 ] ] & LEFT_ADDED ) ? 2.0 : 0.0;
- } else {
- /*where?*/
- for( j=1; j<r->rd_att->natts; j++ ) {
- gistentryinit(entry,v->spl_lattr[j], r, (Page) NULL,
- (OffsetNumber) 0, v->spl_lattrsize[j], FALSE);
- gistpenalty( giststate, j, &entry, v->spl_lisnull[j],
- &identry[j], isnull[j], &lpenalty);
-
- gistentryinit(entry,v->spl_rattr[j], r, (Page) NULL,
- (OffsetNumber) 0, v->spl_rattrsize[j], FALSE);
- gistpenalty( giststate, j, &entry, v->spl_risnull[j],
- &identry[j], isnull[j], &rpenalty);
-
- if ( lpenalty != rpenalty )
+ lpenalty = (v->spl_grpflag[v->spl_idgrp[i + 1]] & LEFT_ADDED) ? 2.0 : 0.0;
+ }
+ else
+ {
+ /* where? */
+ for (j = 1; j < r->rd_att->natts; j++)
+ {
+ gistentryinit(entry, v->spl_lattr[j], r, (Page) NULL,
+ (OffsetNumber) 0, v->spl_lattrsize[j], FALSE);
+ gistpenalty(giststate, j, &entry, v->spl_lisnull[j],
+ &identry[j], isnull[j], &lpenalty);
+
+ gistentryinit(entry, v->spl_rattr[j], r, (Page) NULL,
+ (OffsetNumber) 0, v->spl_rattrsize[j], FALSE);
+ gistpenalty(giststate, j, &entry, v->spl_risnull[j],
+ &identry[j], isnull[j], &rpenalty);
+
+ if (lpenalty != rpenalty)
break;
}
}
/* add */
- if ( lpenalty < rpenalty ) {
- v->spl_grpflag[ v->spl_idgrp[ i+1 ] ] |= LEFT_ADDED;
- v->spl_left[ v->spl_nleft ] = i+1;
+ if (lpenalty < rpenalty)
+ {
+ v->spl_grpflag[v->spl_idgrp[i + 1]] |= LEFT_ADDED;
+ v->spl_left[v->spl_nleft] = i + 1;
v->spl_nleft++;
- for( j=1; j<r->rd_att->natts; j++ ) {
- if ( isnull[j] && v->spl_lisnull[j] ) {
+ for (j = 1; j < r->rd_att->natts; j++)
+ {
+ if (isnull[j] && v->spl_lisnull[j])
+ {
v->spl_lattr[j] = (Datum) 0;
v->spl_lattrsize[j] = 0;
- } else {
+ }
+ else
+ {
FILLEV(
- v->spl_lisnull[j], v->spl_lattr[j], v->spl_lattrsize[j],
- isnull[j], identry[j].key, identry[j].bytes
- );
+ v->spl_lisnull[j], v->spl_lattr[j], v->spl_lattrsize[j],
+ isnull[j], identry[j].key, identry[j].bytes
+ );
datum = FunctionCall2(&giststate->unionFn[j],
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize));
- if ( (!isAttByVal( giststate, j )) && !v->spl_lisnull[j] )
- pfree( DatumGetPointer(v->spl_lattr[j]) );
+ if ((!isAttByVal(giststate, j)) && !v->spl_lisnull[j])
+ pfree(DatumGetPointer(v->spl_lattr[j]));
v->spl_lattr[j] = datum;
v->spl_lattrsize[j] = datumsize;
v->spl_lisnull[j] = false;
}
}
- } else {
- v->spl_grpflag[ v->spl_idgrp[ i+1 ] ] |= RIGHT_ADDED;
- v->spl_right[ v->spl_nright ] = i+1;
+ }
+ else
+ {
+ v->spl_grpflag[v->spl_idgrp[i + 1]] |= RIGHT_ADDED;
+ v->spl_right[v->spl_nright] = i + 1;
v->spl_nright++;
- for( j=1; j<r->rd_att->natts; j++ ) {
- if ( isnull[j] && v->spl_risnull[j] ) {
+ for (j = 1; j < r->rd_att->natts; j++)
+ {
+ if (isnull[j] && v->spl_risnull[j])
+ {
v->spl_rattr[j] = (Datum) 0;
v->spl_rattrsize[j] = 0;
- } else {
+ }
+ else
+ {
FILLEV(
- v->spl_risnull[j], v->spl_rattr[j], v->spl_rattrsize[j],
- isnull[j], identry[j].key, identry[j].bytes
- );
+ v->spl_risnull[j], v->spl_rattr[j], v->spl_rattrsize[j],
+ isnull[j], identry[j].key, identry[j].bytes
+ );
datum = FunctionCall2(&giststate->unionFn[j],
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize));
- if ( (!isAttByVal( giststate, j)) && !v->spl_risnull[j] )
- pfree( DatumGetPointer(v->spl_rattr[j]) );
+ if ((!isAttByVal(giststate, j)) && !v->spl_risnull[j])
+ pfree(DatumGetPointer(v->spl_rattr[j]));
v->spl_rattr[j] = datum;
v->spl_rattrsize[j] = datumsize;
@@ -1094,8 +1186,8 @@ gistadjsubkey(Relation r,
}
}
- }
- gistFreeAtt( r, identry, decfree );
+ }
+ gistFreeAtt(r, identry, decfree);
}
pfree(evec);
}
@@ -1125,11 +1217,12 @@ gistSplit(Relation r,
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
- int i,j,
+ int i,
+ j,
nlen;
- int MaxGrpId = 1;
+ int MaxGrpId = 1;
Datum datum;
- bool IsNull;
+ bool IsNull;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@@ -1168,57 +1261,62 @@ gistSplit(Relation r,
for (i = 1; i <= *len; i++)
{
datum = index_getattr(itup[i - 1], 1, giststate->tupdesc, &IsNull);
- gistdentryinit(giststate, 0,&((GISTENTRY *) VARDATA(entryvec))[i],
- datum, r, p, i,
- ATTSIZE( datum, giststate->tupdesc, 1, IsNull ), FALSE, IsNull);
- if ( (!isAttByVal(giststate,0)) && ((GISTENTRY *) VARDATA(entryvec))[i].key != datum )
+ gistdentryinit(giststate, 0, &((GISTENTRY *) VARDATA(entryvec))[i],
+ datum, r, p, i,
+ ATTSIZE(datum, giststate->tupdesc, 1, IsNull), FALSE, IsNull);
+ if ((!isAttByVal(giststate, 0)) && ((GISTENTRY *) VARDATA(entryvec))[i].key != datum)
decompvec[i] = TRUE;
else
decompvec[i] = FALSE;
}
- /* now let the user-defined picksplit function set up the split vector;
- in entryvec have no null value!! */
+ /*
+ * now let the user-defined picksplit function set up the split
+ * vector; in entryvec have no null value!!
+ */
FunctionCall2(&giststate->picksplitFn[0],
PointerGetDatum(entryvec),
PointerGetDatum(&v));
- /* compatibility with old code */
- if ( v.spl_left[ v.spl_nleft-1 ] == InvalidOffsetNumber )
- v.spl_left[ v.spl_nleft-1 ] = (OffsetNumber)*len;
- if ( v.spl_right[ v.spl_nright-1 ] == InvalidOffsetNumber )
- v.spl_right[ v.spl_nright-1 ] = (OffsetNumber)*len;
-
- v.spl_lattr[0] = v.spl_ldatum;
+ /* compatibility with old code */
+ if (v.spl_left[v.spl_nleft - 1] == InvalidOffsetNumber)
+ v.spl_left[v.spl_nleft - 1] = (OffsetNumber) *len;
+ if (v.spl_right[v.spl_nright - 1] == InvalidOffsetNumber)
+ v.spl_right[v.spl_nright - 1] = (OffsetNumber) *len;
+
+ v.spl_lattr[0] = v.spl_ldatum;
v.spl_rattr[0] = v.spl_rdatum;
v.spl_lisnull[0] = false;
v.spl_risnull[0] = false;
- /* if index is multikey, then we must to try get smaller
- * bounding box for subkey(s)
- */
- if ( r->rd_att->natts > 1 ) {
- v.spl_idgrp = (int*) palloc( sizeof(int) * (*len + 1) );
- MemSet((void*)v.spl_idgrp, 0, sizeof(int) * (*len + 1) );
- v.spl_grpflag = (char*) palloc( sizeof(char) * (*len + 1) );
- MemSet((void*)v.spl_grpflag, 0, sizeof(char) * (*len + 1) );
- v.spl_ngrp = (int*) palloc( sizeof(int) * (*len + 1) );
+ /*
+ * if index is multikey, then we must to try get smaller bounding box
+ * for subkey(s)
+ */
+ if (r->rd_att->natts > 1)
+ {
+ v.spl_idgrp = (int *) palloc(sizeof(int) * (*len + 1));
+ MemSet((void *) v.spl_idgrp, 0, sizeof(int) * (*len + 1));
+ v.spl_grpflag = (char *) palloc(sizeof(char) * (*len + 1));
+ MemSet((void *) v.spl_grpflag, 0, sizeof(char) * (*len + 1));
+ v.spl_ngrp = (int *) palloc(sizeof(int) * (*len + 1));
- MaxGrpId = gistfindgroup( giststate, (GISTENTRY *) VARDATA(entryvec), &v );
+ MaxGrpId = gistfindgroup(giststate, (GISTENTRY *) VARDATA(entryvec), &v);
/* form union of sub keys for each page (l,p) */
- gistunionsubkey( r, giststate, itup, &v );
+ gistunionsubkey(r, giststate, itup, &v);
- /* if possible, we insert equivalent tuples
- * with control by penalty for a subkey(s)
- */
- if ( MaxGrpId > 1 )
- gistadjsubkey( r,itup,len, &v, giststate );
+ /*
+ * if possible, we insert equivalent tuples with control by
+ * penalty for a subkey(s)
+ */
+ if (MaxGrpId > 1)
+ gistadjsubkey(r, itup, len, &v, giststate);
- pfree( v.spl_idgrp );
- pfree( v.spl_grpflag );
- pfree( v.spl_ngrp );
- }
+ pfree(v.spl_idgrp);
+ pfree(v.spl_grpflag);
+ pfree(v.spl_ngrp);
+ }
/* clean up the entry vector: its keys need to be deleted, too */
for (i = 1; i <= *len; i++)
@@ -1231,11 +1329,11 @@ gistSplit(Relation r,
lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
- for(i=0; i<v.spl_nleft;i++)
- lvectup[i] = itup[ v.spl_left[i] - 1 ];
+ for (i = 0; i < v.spl_nleft; i++)
+ lvectup[i] = itup[v.spl_left[i] - 1];
- for(i=0; i<v.spl_nright;i++)
- rvectup[i] = itup[ v.spl_right[i] - 1 ];
+ for (i = 0; i < v.spl_nright; i++)
+ rvectup[i] = itup[v.spl_right[i] - 1];
/* write on disk (may be need another split) */
@@ -1245,9 +1343,9 @@ gistSplit(Relation r,
newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
(res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer(rightbuf);
- for( j=1; j<r->rd_att->natts; j++ )
- if ( (!isAttByVal(giststate,j)) && !v.spl_risnull[j] )
- pfree( DatumGetPointer(v.spl_rattr[j]) );
+ for (j = 1; j < r->rd_att->natts; j++)
+ if ((!isAttByVal(giststate, j)) && !v.spl_risnull[j])
+ pfree(DatumGetPointer(v.spl_rattr[j]));
}
else
{
@@ -1261,7 +1359,7 @@ gistSplit(Relation r,
nlen = 1;
newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
- newtup[0] = gistFormTuple( giststate, r, v.spl_rattr, v.spl_rattrsize, v.spl_risnull );
+ newtup[0] = gistFormTuple(giststate, r, v.spl_rattr, v.spl_rattrsize, v.spl_risnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
@@ -1275,9 +1373,9 @@ gistSplit(Relation r,
(res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
ReleaseBuffer(leftbuf);
- for( j=1; j<r->rd_att->natts; j++ )
- if ( (!isAttByVal(giststate,j)) && !v.spl_lisnull[j] )
- pfree( DatumGetPointer(v.spl_lattr[j]) );
+ for (j = 1; j < r->rd_att->natts; j++)
+ if ((!isAttByVal(giststate, j)) && !v.spl_lisnull[j])
+ pfree(DatumGetPointer(v.spl_lattr[j]));
newtup = gistjoinvector(newtup, &nlen, lntup, llen);
pfree(lntup);
@@ -1297,7 +1395,7 @@ gistSplit(Relation r,
nlen += 1;
newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
- newtup[nlen - 1] = gistFormTuple( giststate, r, v.spl_lattr, v.spl_lattrsize, v.spl_lisnull );
+ newtup[nlen - 1] = gistFormTuple(giststate, r, v.spl_lattr, v.spl_lattrsize, v.spl_lisnull);
ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
}
@@ -1359,47 +1457,56 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
Datum datum;
float usize;
OffsetNumber which;
- float sum_grow, which_grow[INDEX_MAX_KEYS];
+ float sum_grow,
+ which_grow[INDEX_MAX_KEYS];
GISTENTRY entry,
identry[INDEX_MAX_KEYS];
- bool IsNull, decompvec[INDEX_MAX_KEYS], isnull[INDEX_MAX_KEYS];
- int j;
+ bool IsNull,
+ decompvec[INDEX_MAX_KEYS],
+ isnull[INDEX_MAX_KEYS];
+ int j;
maxoff = PageGetMaxOffsetNumber(p);
*which_grow = -1.0;
which = -1;
- sum_grow=1;
- gistDeCompressAtt( giststate, r,
- it, (Page) NULL, (OffsetNumber) 0,
- identry, decompvec, isnull );
+ sum_grow = 1;
+ gistDeCompressAtt(giststate, r,
+ it, (Page) NULL, (OffsetNumber) 0,
+ identry, decompvec, isnull);
for (i = FirstOffsetNumber; i <= maxoff && sum_grow; i = OffsetNumberNext(i))
{
- IndexTuple itup = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
- sum_grow=0;
- for (j=0; j<r->rd_att->natts; j++) {
- datum = index_getattr(itup, j+1, giststate->tupdesc, &IsNull);
- gistdentryinit(giststate, j, &entry, datum, r, p, i, ATTSIZE( datum, giststate->tupdesc, j+1, IsNull ), FALSE, IsNull);
- gistpenalty( giststate, j, &entry, IsNull, &identry[j], isnull[j], &usize);
-
- if ( (!isAttByVal(giststate,j)) && entry.key != datum)
+ IndexTuple itup = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
+
+ sum_grow = 0;
+ for (j = 0; j < r->rd_att->natts; j++)
+ {
+ datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull);
+ gistdentryinit(giststate, j, &entry, datum, r, p, i, ATTSIZE(datum, giststate->tupdesc, j + 1, IsNull), FALSE, IsNull);
+ gistpenalty(giststate, j, &entry, IsNull, &identry[j], isnull[j], &usize);
+
+ if ((!isAttByVal(giststate, j)) && entry.key != datum)
pfree(DatumGetPointer(entry.key));
- if ( which_grow[j]<0 || usize < which_grow[j] ) {
+ if (which_grow[j] < 0 || usize < which_grow[j])
+ {
which = i;
which_grow[j] = usize;
- if ( j<r->rd_att->natts-1 && i==FirstOffsetNumber ) which_grow[j+1]=-1;
- sum_grow += which_grow[j];
- } else if ( which_grow[j] == usize ) {
+ if (j < r->rd_att->natts - 1 && i == FirstOffsetNumber)
+ which_grow[j + 1] = -1;
+ sum_grow += which_grow[j];
+ }
+ else if (which_grow[j] == usize)
sum_grow += usize;
- } else {
- sum_grow=1;
+ else
+ {
+ sum_grow = 1;
break;
}
}
}
- gistFreeAtt( r, identry, decompvec );
+ gistFreeAtt(r, identry, decompvec);
return which;
}
@@ -1434,7 +1541,7 @@ gistdelete(Relation r, ItemPointer tid)
/*
* Since GIST is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
+ * have acquired exclusive lock on index relation. We need no locking
* here.
*/
@@ -1468,7 +1575,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
void *callback_state = (void *) PG_GETARG_POINTER(2);
IndexBulkDeleteResult *result;
- BlockNumber num_pages;
+ BlockNumber num_pages;
double tuples_removed;
double num_index_tuples;
RetrieveIndexResult res;
@@ -1479,7 +1586,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
/*
* Since GIST is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
+ * have acquired exclusive lock on index relation. We need no locking
* here.
*/
@@ -1542,7 +1649,7 @@ gistbulkdelete(PG_FUNCTION_ARGS)
void
initGISTstate(GISTSTATE *giststate, Relation index)
{
- int i;
+ int i;
if (index->rd_att->natts > INDEX_MAX_KEYS)
elog(ERROR, "initGISTstate: numberOfAttributes %d > %d",
@@ -1553,31 +1660,32 @@ initGISTstate(GISTSTATE *giststate, Relation index)
for (i = 0; i < index->rd_att->natts; i++)
{
fmgr_info_copy(&(giststate->consistentFn[i]),
- index_getprocinfo(index, i+1, GIST_CONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIST_CONSISTENT_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->unionFn[i]),
- index_getprocinfo(index, i+1, GIST_UNION_PROC),
+ index_getprocinfo(index, i + 1, GIST_UNION_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->compressFn[i]),
- index_getprocinfo(index, i+1, GIST_COMPRESS_PROC),
+ index_getprocinfo(index, i + 1, GIST_COMPRESS_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->decompressFn[i]),
- index_getprocinfo(index, i+1, GIST_DECOMPRESS_PROC),
+ index_getprocinfo(index, i + 1, GIST_DECOMPRESS_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->penaltyFn[i]),
- index_getprocinfo(index, i+1, GIST_PENALTY_PROC),
+ index_getprocinfo(index, i + 1, GIST_PENALTY_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->picksplitFn[i]),
- index_getprocinfo(index, i+1, GIST_PICKSPLIT_PROC),
+ index_getprocinfo(index, i + 1, GIST_PICKSPLIT_PROC),
CurrentMemoryContext);
fmgr_info_copy(&(giststate->equalFn[i]),
- index_getprocinfo(index, i+1, GIST_EQUAL_PROC),
+ index_getprocinfo(index, i + 1, GIST_EQUAL_PROC),
CurrentMemoryContext);
}
}
void
-freeGISTstate(GISTSTATE *giststate) {
+freeGISTstate(GISTSTATE *giststate)
+{
/* no work */
}
@@ -1592,13 +1700,14 @@ freeGISTstate(GISTSTATE *giststate) {
static IndexTuple
gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
{
- bool IsNull;
- Datum datum = index_getattr(t, 1, r->rd_att, &IsNull);
+ bool IsNull;
+ Datum datum = index_getattr(t, 1, r->rd_att, &IsNull);
/*
* If new entry fits in index tuple, copy it in. To avoid worrying
- * about null-value bitmask, pass it off to the general index_formtuple
- * routine if either the previous or new value is NULL.
+ * about null-value bitmask, pass it off to the general
+ * index_formtuple routine if either the previous or new value is
+ * NULL.
*/
if (!IsNull && DatumGetPointer(entry.key) != NULL &&
(Size) entry.bytes <= ATTSIZE(datum, r, 1, IsNull))
@@ -1638,7 +1747,7 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e,
Datum k, Relation r, Page pg, OffsetNumber o,
int b, bool l, bool isNull)
{
- if ( b && ! isNull )
+ if (b && !isNull)
{
GISTENTRY *dep;
@@ -1655,9 +1764,7 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e,
}
}
else
- {
gistentryinit(*e, (Datum) 0, r, pg, o, 0, l);
- }
}
@@ -1686,92 +1793,103 @@ gistcentryinit(GISTSTATE *giststate, int nkey,
}
}
else
- {
gistentryinit(*e, (Datum) 0, r, pg, o, 0, l);
- }
}
static IndexTuple
-gistFormTuple( GISTSTATE *giststate, Relation r,
- Datum attdata[], int datumsize[], bool isnull[] )
+gistFormTuple(GISTSTATE *giststate, Relation r,
+ Datum attdata[], int datumsize[], bool isnull[])
{
IndexTuple tup;
- char isnullchar[INDEX_MAX_KEYS];
- bool whatfree[INDEX_MAX_KEYS];
- GISTENTRY centry[INDEX_MAX_KEYS];
- Datum compatt[INDEX_MAX_KEYS];
- int j;
-
- for (j = 0; j < r->rd_att->natts; j++) {
- if ( isnull[j] ) {
+ char isnullchar[INDEX_MAX_KEYS];
+ bool whatfree[INDEX_MAX_KEYS];
+ GISTENTRY centry[INDEX_MAX_KEYS];
+ Datum compatt[INDEX_MAX_KEYS];
+ int j;
+
+ for (j = 0; j < r->rd_att->natts; j++)
+ {
+ if (isnull[j])
+ {
isnullchar[j] = 'n';
compatt[j] = (Datum) 0;
whatfree[j] = FALSE;
- } else {
+ }
+ else
+ {
gistcentryinit(giststate, j, &centry[j], attdata[j],
- (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
- datumsize[j], FALSE, FALSE);
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize[j], FALSE, FALSE);
isnullchar[j] = ' ';
compatt[j] = centry[j].key;
- if ( !isAttByVal(giststate,j) ) {
+ if (!isAttByVal(giststate, j))
+ {
whatfree[j] = TRUE;
- if ( centry[j].key != attdata[j] )
+ if (centry[j].key != attdata[j])
pfree(DatumGetPointer(attdata[j]));
- } else
+ }
+ else
whatfree[j] = FALSE;
}
}
tup = (IndexTuple) index_formtuple(giststate->tupdesc, compatt, isnullchar);
for (j = 0; j < r->rd_att->natts; j++)
- if ( whatfree[j] ) pfree(DatumGetPointer(compatt[j]));
+ if (whatfree[j])
+ pfree(DatumGetPointer(compatt[j]));
- return tup;
-}
+ return tup;
+}
static void
-gistDeCompressAtt( GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
- OffsetNumber o, GISTENTRY attdata[], bool decompvec[], bool isnull[] ) {
- int i;
- Datum datum;
+gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
+ OffsetNumber o, GISTENTRY attdata[], bool decompvec[], bool isnull[])
+{
+ int i;
+ Datum datum;
- for(i=0; i < r->rd_att->natts; i++ ) {
- datum = index_getattr(tuple, i+1, giststate->tupdesc, &isnull[i]);
+ for (i = 0; i < r->rd_att->natts; i++)
+ {
+ datum = index_getattr(tuple, i + 1, giststate->tupdesc, &isnull[i]);
gistdentryinit(giststate, i, &attdata[i],
- datum, r, p, o,
- ATTSIZE( datum, giststate->tupdesc, i+1, isnull[i] ), FALSE, isnull[i]);
- if ( isAttByVal(giststate,i) )
+ datum, r, p, o,
+ ATTSIZE(datum, giststate->tupdesc, i + 1, isnull[i]), FALSE, isnull[i]);
+ if (isAttByVal(giststate, i))
decompvec[i] = FALSE;
- else {
- if (attdata[i].key == datum || isnull[i] )
+ else
+ {
+ if (attdata[i].key == datum || isnull[i])
decompvec[i] = FALSE;
else
decompvec[i] = TRUE;
}
}
-}
+}
static void
-gistFreeAtt( Relation r, GISTENTRY attdata[], bool decompvec[] ) {
- int i;
- for(i=0; i < r->rd_att->natts; i++ )
- if ( decompvec[i] )
- pfree( DatumGetPointer(attdata[i].key) );
+gistFreeAtt(Relation r, GISTENTRY attdata[], bool decompvec[])
+{
+ int i;
+
+ for (i = 0; i < r->rd_att->natts; i++)
+ if (decompvec[i])
+ pfree(DatumGetPointer(attdata[i].key));
}
static void
-gistpenalty( GISTSTATE *giststate, int attno,
- GISTENTRY *key1, bool isNull1,
- GISTENTRY *key2, bool isNull2, float *penalty ){
- if ( giststate->penaltyFn[attno].fn_strict && ( isNull1 || isNull2 ) )
- *penalty=0.0;
- else
+gistpenalty(GISTSTATE *giststate, int attno,
+ GISTENTRY *key1, bool isNull1,
+ GISTENTRY *key2, bool isNull2, float *penalty)
+{
+ if (giststate->penaltyFn[attno].fn_strict && (isNull1 || isNull2))
+ *penalty = 0.0;
+ else
FunctionCall3(&giststate->penaltyFn[attno],
- PointerGetDatum(key1),
- PointerGetDatum(key2),
- PointerGetDatum(penalty));
+ PointerGetDatum(key1),
+ PointerGetDatum(key2),
+ PointerGetDatum(penalty));
}
-
+
#ifdef GISTDEBUG
static void
gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
@@ -1816,7 +1934,6 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
ReleaseBuffer(buffer);
pfree(pred);
}
-
#endif /* defined GISTDEBUG */
void
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index be2d7498453..ed92258e624 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.30 2001/08/22 18:24:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gistget.c,v 1.31 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -251,7 +251,7 @@ gistindex_keytest(IndexTuple tuple,
if (key[0].sk_flags & SK_ISNULL)
return false;
*/
- gistdentryinit(giststate, key[0].sk_attno-1, &de,
+ gistdentryinit(giststate, key[0].sk_attno - 1, &de,
datum, r, p, offset,
IndexTupleSize(tuple) - sizeof(IndexTupleData),
FALSE, isNull);
@@ -271,9 +271,9 @@ gistindex_keytest(IndexTuple tuple,
ObjectIdGetDatum(key[0].sk_procedure));
}
- if ( de.key != datum && ! isAttByVal( giststate, key[0].sk_attno-1 ) )
- if ( DatumGetPointer(de.key) != NULL )
- pfree( DatumGetPointer(de.key) );
+ if (de.key != datum && !isAttByVal(giststate, key[0].sk_attno - 1))
+ if (DatumGetPointer(de.key) != NULL)
+ pfree(DatumGetPointer(de.key));
if (DatumGetBool(test) == !!(key[0].sk_flags & SK_NEGATE))
return false;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 2b65321b038..8623b8f643e 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.39 2001/08/22 18:24:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.40 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -112,7 +112,7 @@ gistrescan(PG_FUNCTION_ARGS)
s->keyData[i].sk_procedure
= RelationGetGISTStrategy(s->relation, s->keyData[i].sk_attno,
s->keyData[i].sk_procedure);
- s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno-1];
+ s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1];
}
}
else
@@ -137,13 +137,13 @@ gistrescan(PG_FUNCTION_ARGS)
/*----------
* s->keyData[i].sk_procedure =
- * index_getprocid(s->relation, 1, GIST_CONSISTENT_PROC);
+ * index_getprocid(s->relation, 1, GIST_CONSISTENT_PROC);
*----------
*/
s->keyData[i].sk_procedure
= RelationGetGISTStrategy(s->relation, s->keyData[i].sk_attno,
s->keyData[i].sk_procedure);
- s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno-1];
+ s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1];
}
}
@@ -234,8 +234,8 @@ gistendscan(PG_FUNCTION_ARGS)
{
gistfreestack(p->s_stack);
gistfreestack(p->s_markstk);
- if ( p->giststate != NULL )
- freeGISTstate( p->giststate );
+ if (p->giststate != NULL)
+ freeGISTstate(p->giststate);
pfree(s->opaque);
}
@@ -383,7 +383,6 @@ adjustiptr(IndexScanDesc s,
}
else
{
-
/*
* remember that we're before the current
* tuple
diff --git a/src/backend/access/gist/giststrat.c b/src/backend/access/gist/giststrat.c
index e0bae48a3c4..db38191d6a0 100644
--- a/src/backend/access/gist/giststrat.c
+++ b/src/backend/access/gist/giststrat.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/Attic/giststrat.c,v 1.17 2001/05/30 19:53:40 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/Attic/giststrat.c,v 1.18 2001/10/25 05:49:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,5 +121,4 @@ RelationInvokeGISTStrategy(Relation r,
return (RelationInvokeStrategy(r, &GISTEvaluationData, attnum, s,
left, right));
}
-
#endif
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 9b0e6cf28ee..177f3648f40 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.52 2001/07/15 22:48:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.53 2001/10/25 05:49:20 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -37,11 +37,11 @@ typedef struct
} HashBuildState;
static void hashbuildCallback(Relation index,
- HeapTuple htup,
- Datum *attdata,
- char *nulls,
- bool tupleIsAlive,
- void *state);
+ HeapTuple htup,
+ Datum *attdata,
+ char *nulls,
+ bool tupleIsAlive,
+ void *state);
/*
@@ -80,7 +80,7 @@ hashbuild(PG_FUNCTION_ARGS)
/* do the heap scan */
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
- hashbuildCallback, (void *) &buildstate);
+ hashbuildCallback, (void *) &buildstate);
/* all done */
BuildingHash = false;
@@ -121,7 +121,7 @@ hashbuildCallback(Relation index,
bool tupleIsAlive,
void *state)
{
- HashBuildState *buildstate = (HashBuildState *) state;
+ HashBuildState *buildstate = (HashBuildState *) state;
IndexTuple itup;
HashItem hitem;
InsertIndexResult res;
@@ -164,6 +164,7 @@ hashinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
@@ -176,14 +177,13 @@ hashinsert(PG_FUNCTION_ARGS)
itup->t_tid = *ht_ctid;
/*
- * If the single index key is null, we don't insert it into the
- * index. Hash tables support scans on '='. Relational algebra
- * says that A = B returns null if either A or B is null. This
- * means that no qualification used in an index scan could ever
- * return true on a null attribute. It also means that indices
- * can't be used by ISNULL or NOTNULL scans, but that's an
- * artifact of the strategy map architecture chosen in 1986, not
- * of the way nulls are handled here.
+ * If the single index key is null, we don't insert it into the index.
+ * Hash tables support scans on '='. Relational algebra says that A =
+ * B returns null if either A or B is null. This means that no
+ * qualification used in an index scan could ever return true on a
+ * null attribute. It also means that indices can't be used by ISNULL
+ * or NOTNULL scans, but that's an artifact of the strategy map
+ * architecture chosen in 1986, not of the way nulls are handled here.
*/
if (IndexTupleHasNulls(itup))
{
@@ -262,7 +262,6 @@ hashrescan(PG_FUNCTION_ARGS)
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
-
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
@@ -412,7 +411,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
void *callback_state = (void *) PG_GETARG_POINTER(2);
IndexBulkDeleteResult *result;
- BlockNumber num_pages;
+ BlockNumber num_pages;
double tuples_removed;
double num_index_tuples;
RetrieveIndexResult res;
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 5439dce2148..2f99e7426dc 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.22 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.23 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -133,13 +133,11 @@ _hash_insertonpg(Relation rel,
while (PageGetFreeSpace(page) < itemsz)
{
-
/*
* no space on this page; check for an overflow page
*/
if (BlockNumberIsValid(pageopaque->hasho_nextblkno))
{
-
/*
* ovfl page exists; go get it. if it doesn't have room,
* we'll find out next pass through the loop test above.
@@ -152,7 +150,6 @@ _hash_insertonpg(Relation rel,
}
else
{
-
/*
* we're at the end of the bucket chain and we haven't found a
* page with enough room. allocate a new overflow page.
@@ -184,7 +181,6 @@ _hash_insertonpg(Relation rel,
if (res != NULL)
{
-
/*
* Increment the number of keys in the table. We switch lock
* access type just for a moment to allow greater accessibility to
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index c9fb065dbd2..e41c1bd0a3e 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.30 2001/07/15 22:48:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.31 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -208,7 +208,6 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
}
else
{
-
/*
* Free_bit addresses the last used bit. Bump it to address the
* first available bit.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index b8c520e3c0d..0fff5a11e6f 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.32 2001/07/15 22:48:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.33 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -50,7 +50,7 @@ static void _hash_splitpage(Relation rel, Buffer metabuf, Bucket obucket, Bucket
* system catalogs anyway.
*
* Note that our page locks are actual lockmanager locks, not buffer
- * locks (as are used by btree, for example). This is a good idea because
+ * locks (as are used by btree, for example). This is a good idea because
* the algorithms are not deadlock-free, and we'd better be able to detect
* and recover from deadlocks.
*
@@ -325,7 +325,7 @@ _hash_setpagelock(Relation rel,
{
switch (access)
{
- case HASH_WRITE:
+ case HASH_WRITE:
LockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:
@@ -349,7 +349,7 @@ _hash_unsetpagelock(Relation rel,
{
switch (access)
{
- case HASH_WRITE:
+ case HASH_WRITE:
UnlockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:
@@ -369,7 +369,7 @@ _hash_unsetpagelock(Relation rel,
* It is safe to delete an item after acquiring a regular WRITE lock on
* the page, because no other backend can hold a READ lock on the page,
* and that means no other backend currently has an indexscan stopped on
- * any item of the item being deleted. Our own backend might have such
+ * any item of the item being deleted. Our own backend might have such
* an indexscan (in fact *will*, since that's how VACUUM found the item
* in the first place), but _hash_adjscans will fix the scan position.
*/
@@ -532,7 +532,6 @@ _hash_splitpage(Relation rel,
_hash_relbuf(rel, obuf, HASH_WRITE);
if (!BlockNumberIsValid(oblkno))
{
-
/*
* the old bucket is completely empty; of course, the new
* bucket will be as well, but since it's a base bucket page
@@ -559,7 +558,6 @@ _hash_splitpage(Relation rel,
omaxoffnum = PageGetMaxOffsetNumber(opage);
for (;;)
{
-
/*
* at each iteration through this loop, each of these variables
* should be up-to-date: obuf opage oopaque ooffnum omaxoffnum
@@ -572,7 +570,6 @@ _hash_splitpage(Relation rel,
oblkno = oopaque->hasho_nextblkno;
if (BlockNumberIsValid(oblkno))
{
-
/*
* we ran out of tuples on this particular page, but we
* have more overflow pages; re-init values.
@@ -594,7 +591,6 @@ _hash_splitpage(Relation rel,
}
else
{
-
/*
* we're at the end of the bucket chain, so now we're
* really done with everything. before quitting, call
@@ -618,7 +614,6 @@ _hash_splitpage(Relation rel,
if (bucket == nbucket)
{
-
/*
* insert the tuple into the new bucket. if it doesn't fit on
* the current page in the new bucket, we must allocate a new
@@ -695,7 +690,6 @@ _hash_splitpage(Relation rel,
}
else
{
-
/*
* the tuple stays on this page. we didn't move anything, so
* we didn't delete anything and therefore we don't have to
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 27ca0301181..6ea643dcaf5 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.26 2001/03/23 04:49:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.27 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,6 @@ _hash_search(Relation rel,
if (scankey == (ScanKey) NULL ||
(keyDatum = scankey[0].sk_argument) == (Datum) NULL)
{
-
/*
* If the scankey argument is NULL, all tuples will satisfy the
* scan so we start the scan at the first bucket (bucket 0).
diff --git a/src/backend/access/hash/hashstrat.c b/src/backend/access/hash/hashstrat.c
index fc5df2372d8..a765c2e2b19 100644
--- a/src/backend/access/hash/hashstrat.c
+++ b/src/backend/access/hash/hashstrat.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.18 2001/05/30 19:53:40 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.19 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,7 +46,6 @@ static StrategyEvaluationData HTEvaluationData = {
(StrategyTransformMap) HTNegateCommute,
HTEvaluationExpressions
};
-
#endif
/* ----------------------------------------------------------------
@@ -68,7 +67,6 @@ _hash_getstrat(Relation rel,
return strat;
}
-
#endif
#ifdef NOT_USED
@@ -82,5 +80,4 @@ _hash_invokestrat(Relation rel,
return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat,
left, right));
}
-
#endif
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index c64a19faa61..f73ca50285b 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.125 2001/08/23 23:06:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.126 2001/10/25 05:49:21 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -116,8 +116,8 @@ heapgettup(Relation relation,
{
ItemId lpp;
Page dp;
- BlockNumber page;
- BlockNumber pages;
+ BlockNumber page;
+ BlockNumber pages;
int lines;
OffsetNumber lineoff;
int linesleft;
@@ -350,7 +350,7 @@ heapgettup(Relation relation,
/*
* return NULL if we've exhausted all the pages
*/
- if ((dir < 0) ? (page == 0) : (page+1 >= pages))
+ if ((dir < 0) ? (page == 0) : (page + 1 >= pages))
{
if (BufferIsValid(*buffer))
ReleaseBuffer(*buffer);
@@ -429,9 +429,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
(
(Datum) NULL
)
- );
+ );
}
-
#endif /* defined(DISABLE_COMPLEX_MACRO) */
@@ -1045,12 +1044,13 @@ heap_insert(Relation relation, HeapTuple tup)
if (relation->rd_rel->relhasoids)
{
/*
- * If the object id of this tuple has already been assigned, trust the
- * caller. There are a couple of ways this can happen. At initial db
- * creation, the backend program sets oids for tuples. When we define
- * an index, we set the oid. Finally, in the future, we may allow
- * users to set their own object ids in order to support a persistent
- * object store (objects need to contain pointers to one another).
+ * If the object id of this tuple has already been assigned, trust
+ * the caller. There are a couple of ways this can happen. At
+ * initial db creation, the backend program sets oids for tuples.
+ * When we define an index, we set the oid. Finally, in the
+ * future, we may allow users to set their own object ids in order
+ * to support a persistent object store (objects need to contain
+ * pointers to one another).
*/
if (!OidIsValid(tup->t_data->t_oid))
tup->t_data->t_oid = newoid();
@@ -1478,21 +1478,22 @@ l2:
}
/*
- * Now, do we need a new page for the tuple, or not? This is a bit
- * tricky since someone else could have added tuples to the page
- * while we weren't looking. We have to recheck the available space
- * after reacquiring the buffer lock. But don't bother to do that
- * if the former amount of free space is still not enough; it's
- * unlikely there's more free now than before.
+ * Now, do we need a new page for the tuple, or not? This is a
+ * bit tricky since someone else could have added tuples to the
+ * page while we weren't looking. We have to recheck the
+ * available space after reacquiring the buffer lock. But don't
+ * bother to do that if the former amount of free space is still
+ * not enough; it's unlikely there's more free now than before.
*
* What's more, if we need to get a new page, we will need to acquire
- * buffer locks on both old and new pages. To avoid deadlock against
- * some other backend trying to get the same two locks in the other
- * order, we must be consistent about the order we get the locks in.
- * We use the rule "lock the lower-numbered page of the relation
- * first". To implement this, we must do RelationGetBufferForTuple
- * while not holding the lock on the old page, and we must rely on it
- * to get the locks on both pages in the correct order.
+ * buffer locks on both old and new pages. To avoid deadlock
+ * against some other backend trying to get the same two locks in
+ * the other order, we must be consistent about the order we get
+ * the locks in. We use the rule "lock the lower-numbered page of
+ * the relation first". To implement this, we must do
+ * RelationGetBufferForTuple while not holding the lock on the old
+ * page, and we must rely on it to get the locks on both pages in
+ * the correct order.
*/
if (newtupsize > pagefree)
{
@@ -1510,8 +1511,8 @@ l2:
{
/*
* Rats, it doesn't fit anymore. We must now unlock and
- * relock to avoid deadlock. Fortunately, this path should
- * seldom be taken.
+ * relock to avoid deadlock. Fortunately, this path
+ * should seldom be taken.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
newbuf = RelationGetBufferForTuple(relation, newtup->t_len,
@@ -1534,9 +1535,9 @@ l2:
pgstat_count_heap_update(&relation->pgstat_info);
/*
- * At this point newbuf and buffer are both pinned and locked,
- * and newbuf has enough space for the new tuple. If they are
- * the same buffer, only one pin is held.
+ * At this point newbuf and buffer are both pinned and locked, and
+ * newbuf has enough space for the new tuple. If they are the same
+ * buffer, only one pin is held.
*/
/* NO ELOG(ERROR) from here till changes are logged */
@@ -1865,12 +1866,14 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
* Note: xlhdr is declared to have adequate size and correct alignment
* for an xl_heap_header. However the two tids, if present at all,
* will be packed in with no wasted space after the xl_heap_header;
- * they aren't necessarily aligned as implied by this struct declaration.
+ * they aren't necessarily aligned as implied by this struct
+ * declaration.
*/
- struct {
- xl_heap_header hdr;
- TransactionId tid1;
- TransactionId tid2;
+ struct
+ {
+ xl_heap_header hdr;
+ TransactionId tid1;
+ TransactionId tid2;
} xlhdr;
int hsize = SizeOfHeapHeader;
xl_heap_update xlrec;
@@ -1972,7 +1975,7 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean)
{
- OffsetNumber unbuf[BLCKSZ/sizeof(OffsetNumber)];
+ OffsetNumber unbuf[BLCKSZ / sizeof(OffsetNumber)];
OffsetNumber *unused = unbuf;
char *unend;
ItemId lp;
@@ -2084,9 +2087,10 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
- struct {
+ struct
+ {
HeapTupleHeaderData hdr;
- char data[MaxTupleSize];
+ char data[MaxTupleSize];
} tbuf;
HeapTupleHeader htup;
xl_heap_header xlhdr;
@@ -2251,9 +2255,10 @@ newsame:;
if (redo)
{
- struct {
+ struct
+ {
HeapTupleHeaderData hdr;
- char data[MaxTupleSize];
+ char data[MaxTupleSize];
} tbuf;
xl_heap_header xlhdr;
int hsize;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 81a559b37e4..44ecb3c8c71 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.42 2001/07/13 22:52:58 tgl Exp $
+ * $Id: hio.c,v 1.43 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,7 +83,7 @@ RelationPutHeapTuple(Relation relation,
* NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
* same buffer we select for insertion of the new tuple (this could only
* happen if space is freed in that page after heap_update finds there's not
- * enough there). In that case, the page will be pinned and locked only once.
+ * enough there). In that case, the page will be pinned and locked only once.
*
* Note that we use LockPage(rel, 0) to lock relation for extension.
* We can do this as long as in all other places we use page-level locking
@@ -115,17 +115,19 @@ RelationGetBufferForTuple(Relation relation, Size len,
if (otherBuffer != InvalidBuffer)
otherBlock = BufferGetBlockNumber(otherBuffer);
else
- otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
+ otherBlock = InvalidBlockNumber; /* just to keep compiler
+ * quiet */
/*
* We first try to put the tuple on the same page we last inserted a
* tuple on, as cached in the relcache entry. If that doesn't work,
- * we ask the shared Free Space Map to locate a suitable page. Since
+ * we ask the shared Free Space Map to locate a suitable page. Since
* the FSM's info might be out of date, we have to be prepared to loop
* around and retry multiple times. (To insure this isn't an infinite
- * loop, we must update the FSM with the correct amount of free space on
- * each page that proves not to be suitable.) If the FSM has no record of
- * a page with enough free space, we give up and extend the relation.
+ * loop, we must update the FSM with the correct amount of free space
+ * on each page that proves not to be suitable.) If the FSM has no
+ * record of a page with enough free space, we give up and extend the
+ * relation.
*/
targetBlock = relation->rd_targblock;
@@ -137,6 +139,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
* target.
*/
targetBlock = GetPageWithFreeSpace(&relation->rd_node, len);
+
/*
* If the FSM knows nothing of the rel, try the last page before
* we give up and extend. This avoids one-tuple-per-page syndrome
@@ -144,7 +147,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
*/
if (targetBlock == InvalidBlockNumber)
{
- BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
+ BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
if (nblocks > 0)
targetBlock = nblocks - 1;
@@ -154,9 +157,9 @@ RelationGetBufferForTuple(Relation relation, Size len,
while (targetBlock != InvalidBlockNumber)
{
/*
- * Read and exclusive-lock the target block, as well as the
- * other block if one was given, taking suitable care with
- * lock ordering and the possibility they are the same block.
+ * Read and exclusive-lock the target block, as well as the other
+ * block if one was given, taking suitable care with lock ordering
+ * and the possibility they are the same block.
*/
if (otherBuffer == InvalidBuffer)
{
@@ -184,9 +187,10 @@ RelationGetBufferForTuple(Relation relation, Size len,
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
}
+
/*
- * Now we can check to see if there's enough free space here.
- * If so, we're done.
+ * Now we can check to see if there's enough free space here. If
+ * so, we're done.
*/
pageHeader = (Page) BufferGetPage(buffer);
pageFreeSpace = PageGetFreeSpace(pageHeader);
@@ -196,22 +200,22 @@ RelationGetBufferForTuple(Relation relation, Size len,
relation->rd_targblock = targetBlock;
return buffer;
}
+
/*
- * Not enough space, so we must give up our page locks and
- * pin (if any) and prepare to look elsewhere. We don't care
- * which order we unlock the two buffers in, so this can be
- * slightly simpler than the code above.
+ * Not enough space, so we must give up our page locks and pin (if
+ * any) and prepare to look elsewhere. We don't care which order
+ * we unlock the two buffers in, so this can be slightly simpler
+ * than the code above.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
if (otherBuffer == InvalidBuffer)
- {
ReleaseBuffer(buffer);
- }
else if (otherBlock != targetBlock)
{
LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
+
/*
* Update FSM as to condition of this page, and ask for another
* page to try.
@@ -225,9 +229,9 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Have to extend the relation.
*
- * We have to use a lock to ensure no one else is extending the
- * rel at the same time, else we will both try to initialize the
- * same new page.
+ * We have to use a lock to ensure no one else is extending the rel at
+ * the same time, else we will both try to initialize the same new
+ * page.
*/
if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock);
@@ -236,20 +240,21 @@ RelationGetBufferForTuple(Relation relation, Size len,
* XXX This does an lseek - rather expensive - but at the moment it is
* the only way to accurately determine how many blocks are in a
* relation. Is it worth keeping an accurate file length in shared
- * memory someplace, rather than relying on the kernel to do it for us?
+ * memory someplace, rather than relying on the kernel to do it for
+ * us?
*/
buffer = ReadBuffer(relation, P_NEW);
/*
- * Release the file-extension lock; it's now OK for someone else
- * to extend the relation some more.
+ * Release the file-extension lock; it's now OK for someone else to
+ * extend the relation some more.
*/
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
/*
- * We can be certain that locking the otherBuffer first is OK,
- * since it must have a lower page number.
+ * We can be certain that locking the otherBuffer first is OK, since
+ * it must have a lower page number.
*/
if (otherBuffer != InvalidBuffer)
LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
@@ -273,7 +278,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
*
* XXX should we enter the new page into the free space map immediately,
* or just keep it for this backend's exclusive use in the short run
- * (until VACUUM sees it)? Seems to depend on whether you expect the
+ * (until VACUUM sees it)? Seems to depend on whether you expect the
* current backend to make more insertions or not, which is probably a
* good bet most of the time. So for now, don't add it to FSM yet.
*/
diff --git a/src/backend/access/heap/stats.c b/src/backend/access/heap/stats.c
index 6dabf49e341..6f5dfbea141 100644
--- a/src/backend/access/heap/stats.c
+++ b/src/backend/access/heap/stats.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.24 2001/03/22 06:16:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.25 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
* initam should be moved someplace else.
@@ -164,7 +164,6 @@ ResetHeapAccessStatistics()
time(&stats->local_reset_timestamp);
time(&stats->last_request_timestamp);
}
-
#endif
#ifdef NOT_USED
@@ -200,7 +199,6 @@ GetHeapAccessStatistics()
return stats;
}
-
#endif
#ifdef NOT_USED
@@ -211,7 +209,6 @@ GetHeapAccessStatistics()
void
PrintHeapAccessStatistics(HeapAccessStatistics stats)
{
-
/*
* return nothing if stats aren't valid
*/
@@ -302,7 +299,6 @@ PrintHeapAccessStatistics(HeapAccessStatistics stats)
printf("\n");
}
-
#endif
#ifdef NOT_USED
@@ -317,7 +313,6 @@ PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats)
if (stats != NULL)
pfree(stats);
}
-
#endif
/* ----------------------------------------------------------------
@@ -331,7 +326,6 @@ PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats)
void
initam(void)
{
-
/*
* initialize heap statistics.
*/
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index dd881ca5f01..5ddefde8c71 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.24 2001/08/10 18:57:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.25 2001/10/25 05:49:21 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -74,14 +74,13 @@ heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* external storage (possibly still in compressed format).
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_fetch_attr(varattrib *attr)
{
varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
-
/*
* This is an external stored plain value
*/
@@ -89,7 +88,6 @@ heap_tuple_fetch_attr(varattrib *attr)
}
else
{
-
/*
* This is a plain value inside of the main tuple - why am I
* called?
@@ -108,7 +106,7 @@ heap_tuple_fetch_attr(varattrib *attr)
* or external storage.
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_untoast_attr(varattrib *attr)
{
varattrib *result;
@@ -135,7 +133,6 @@ heap_tuple_untoast_attr(varattrib *attr)
}
else
{
-
/*
* This is an external stored plain value
*/
@@ -144,7 +141,6 @@ heap_tuple_untoast_attr(varattrib *attr)
}
else if (VARATT_IS_COMPRESSED(attr))
{
-
/*
* This is a compressed value inside of the main tuple
*/
@@ -181,8 +177,8 @@ toast_raw_datum_size(Datum value)
if (VARATT_IS_COMPRESSED(attr))
{
/*
- * va_rawsize shows the original data size, whether the datum
- * is external or not.
+ * va_rawsize shows the original data size, whether the datum is
+ * external or not.
*/
result = attr->va_content.va_compressed.va_rawsize + VARHDRSZ;
}
@@ -301,7 +297,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (oldtup != NULL)
{
-
/*
* For UPDATE get the old and new values of this attribute
*/
@@ -324,7 +319,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
old_value->va_content.va_external.va_toastrelid !=
new_value->va_content.va_external.va_toastrelid)
{
-
/*
* The old external store value isn't needed any more
* after the update
@@ -334,7 +328,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
-
/*
* This attribute isn't changed by this update so we
* reuse the original reference to the old value in
@@ -348,7 +341,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
-
/*
* For INSERT simply get the new value
*/
@@ -372,7 +364,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*/
if (att[i]->attlen == -1)
{
-
/*
* If the table's attribute says PLAIN always, force it so.
*/
@@ -400,7 +391,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
-
/*
* Not a variable size attribute, plain storage always
*/
@@ -475,7 +465,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
-
/*
* incompressible data, ignore on subsequent compression
* passes
@@ -588,7 +577,6 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
}
else
{
-
/*
* incompressible data, ignore on subsequent compression
* passes
@@ -776,9 +764,10 @@ toast_save_datum(Relation rel, Datum value)
Datum t_values[3];
char t_nulls[3];
varattrib *result;
- struct {
- struct varlena hdr;
- char data[TOAST_MAX_CHUNK_SIZE];
+ struct
+ {
+ struct varlena hdr;
+ char data[TOAST_MAX_CHUNK_SIZE];
} chunk_data;
int32 chunk_size;
int32 chunk_seq = 0;
@@ -851,12 +840,12 @@ toast_save_datum(Relation rel, Datum value)
heap_insert(toastrel, toasttup);
/*
- * Create the index entry. We cheat a little here by not using
+ * Create the index entry. We cheat a little here by not using
* FormIndexDatum: this relies on the knowledge that the index
* columns are the same as the initial columns of the table.
*
- * Note also that there had better not be any user-created index
- * on the TOAST table, since we don't bother to update anything else.
+ * Note also that there had better not be any user-created index on
+ * the TOAST table, since we don't bother to update anything else.
*/
idxres = index_insert(toastidx, t_values, t_nulls,
&(toasttup->t_self),
@@ -916,8 +905,8 @@ toast_delete_datum(Relation rel, Datum value)
toastidx = index_open(toastrel->rd_rel->reltoastidxid);
/*
- * Setup a scan key to fetch from the index by va_valueid
- * (we don't particularly care whether we see them in sequence or not)
+ * Setup a scan key to fetch from the index by va_valueid (we don't
+ * particularly care whether we see them in sequence or not)
*/
ScanKeyEntryInitialize(&toastkey,
(bits16) 0,
@@ -1096,5 +1085,4 @@ toast_fetch_datum(varattrib *attr)
return result;
}
-
#endif /* TUPLE_TOASTER_ACTIVE */
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 1115fb828b2..b03690257c8 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.28 2001/06/22 19:16:21 wieck Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.29 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -240,5 +240,4 @@ IndexScanRestorePosition(IndexScanDesc scan)
scan->flags = 0x0; /* XXX should have a symbolic name */
}
-
#endif
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 2a1d3294dd0..8e1d5b87332 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.53 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.54 2001/10/25 05:49:21 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relationId
@@ -241,9 +241,9 @@ index_beginscan(Relation relation,
pgstat_initstats(&scan->xs_pgstat_info, relation);
/*
- * We want to look up the amgettuple procedure just once per scan,
- * not once per index_getnext call. So do it here and save
- * the fmgr info result in the scan descriptor.
+ * We want to look up the amgettuple procedure just once per scan, not
+ * once per index_getnext call. So do it here and save the fmgr info
+ * result in the scan descriptor.
*/
GET_SCAN_PROCEDURE(beginscan, amgettuple);
fmgr_info(procedure, &scan->fn_getnext);
@@ -342,8 +342,8 @@ index_getnext(IndexScanDesc scan,
pgstat_count_index_scan(&scan->xs_pgstat_info);
/*
- * have the am's gettuple proc do all the work.
- * index_beginscan already set up fn_getnext.
+ * have the am's gettuple proc do all the work. index_beginscan
+ * already set up fn_getnext.
*/
result = (RetrieveIndexResult)
DatumGetPointer(FunctionCall2(&scan->fn_getnext,
@@ -378,8 +378,8 @@ index_bulk_delete(Relation relation,
result = (IndexBulkDeleteResult *)
DatumGetPointer(OidFunctionCall3(procedure,
PointerGetDatum(relation),
- PointerGetDatum((Pointer) callback),
- PointerGetDatum(callback_state)));
+ PointerGetDatum((Pointer) callback),
+ PointerGetDatum(callback_state)));
return result;
}
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index 568581fc18a..88d1757e4cd 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.53 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.54 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -120,9 +120,9 @@ StrategyOperatorIsValid(StrategyOperator operator,
StrategyNumber maxStrategy)
{
return (bool)
- (PointerIsValid(operator) &&
- StrategyNumberIsInBounds(operator->strategy, maxStrategy) &&
- !(operator->flags & ~(SK_NEGATE | SK_COMMUTE)));
+ (PointerIsValid(operator) &&
+ StrategyNumberIsInBounds(operator->strategy, maxStrategy) &&
+ !(operator->flags & ~(SK_NEGATE | SK_COMMUTE)));
}
/* ----------------
@@ -196,7 +196,6 @@ StrategyEvaluationIsValid(StrategyEvaluation evaluation)
}
return true;
}
-
#endif
#ifdef NOT_USED
@@ -255,7 +254,6 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
-
#endif
/* ----------------
@@ -453,7 +451,6 @@ RelationInvokeStrategy(Relation relation,
/* not reached, just to make compiler happy */
return FALSE;
}
-
#endif
/* ----------------
@@ -552,7 +549,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
{
for (attIndex = 0; attIndex < maxAttributeNumber; attIndex++)
{
- Oid opclass = operatorClassObjectId[attIndex];
+ Oid opclass = operatorClassObjectId[attIndex];
RegProcedure *loc;
StrategyNumber support;
@@ -562,7 +559,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
{
tuple = SearchSysCache(AMPROCNUM,
ObjectIdGetDatum(opclass),
- Int16GetDatum(support+1),
+ Int16GetDatum(support + 1),
0, 0);
if (HeapTupleIsValid(tuple))
{
@@ -581,7 +578,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
/* Now load the strategy information for the index operators */
for (attIndex = 0; attIndex < maxAttributeNumber; attIndex++)
{
- Oid opclass = operatorClassObjectId[attIndex];
+ Oid opclass = operatorClassObjectId[attIndex];
StrategyMap map;
StrategyNumber strategy;
@@ -591,7 +588,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
for (strategy = 1; strategy <= maxStrategyNumber; strategy++)
{
- ScanKey mapentry = StrategyMapGetScanKeyEntry(map, strategy);
+ ScanKey mapentry = StrategyMapGetScanKeyEntry(map, strategy);
tuple = SearchSysCache(AMOPSTRATEGY,
ObjectIdGetDatum(opclass),
@@ -643,5 +640,4 @@ IndexStrategyDisplay(IndexStrategy indexStrategy,
}
}
}
-
#endif /* defined(ISTRATDEBUG) */
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index b1affe20188..a4ba5870534 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.42 2001/05/03 19:00:36 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.43 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
*
@@ -25,7 +25,7 @@
* NOTE: although any negative int32 is acceptable for reporting "<",
* and any positive int32 is acceptable for reporting ">", routines
* that work on 32-bit or wider datatypes can't just return "a - b".
- * That could overflow and give the wrong answer. Also, one should not
+ * That could overflow and give the wrong answer. Also, one should not
* return INT_MIN to report "<", since some callers will negate the result.
*
* NOTE: it is critical that the comparison function impose a total order
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index bc250cd48e8..1d3a7e82ab0 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.86 2001/09/29 23:49:51 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.87 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1100,7 +1100,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
* If we are not on the leaf level, we will be able to discard the key
* data from the first item that winds up on the right page.
*/
- if (! state->is_leaf)
+ if (!state->is_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
@@ -1115,7 +1115,8 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
{
/*
* On a rightmost page, try to equalize right free space with
- * twice the left free space. See comments for _bt_findsplitloc.
+ * twice the left free space. See comments for
+ * _bt_findsplitloc.
*/
delta = (2 * leftfree) - rightfree;
}
@@ -1618,7 +1619,6 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
for (;;)
{
-
/*
* Read up to 2 more child pages and look for pointers to them in
* *saved* parent page
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 376274c5621..2e6eb20cd4c 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.53 2001/07/15 22:48:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.54 2001/10/25 05:49:21 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -153,7 +153,6 @@ _bt_getroot(Relation rel, int access)
*/
if (metad->btm_root == P_NONE)
{
-
/*
* Get, initialize, write, and leave a lock of the appropriate
* type on the new root page. Since this is the first page in
@@ -209,7 +208,6 @@ _bt_getroot(Relation rel, int access)
}
else
{
-
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@@ -237,7 +235,6 @@ _bt_getroot(Relation rel, int access)
if (!P_ISROOT(rootopaque))
{
-
/*
* It happened, but if root page splitter failed to create new
* root page then we'll go in loop trying to call _bt_getroot
@@ -402,7 +399,6 @@ _bt_wrtnorelbuf(Relation rel, Buffer buf)
void
_bt_pageinit(Page page, Size size)
{
-
/*
* Cargo_cult programming -- don't really need this to be zero, but
* creating new pages is an infrequent occurrence and it makes me feel
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index b1426456241..e49f06b1c38 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.82 2001/07/15 22:48:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.83 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,6 +37,7 @@ typedef struct
bool haveDead;
Relation heapRel;
BTSpool *spool;
+
/*
* spool2 is needed only when the index is an unique index. Dead
* tuples are put into spool2 instead of spool in order to avoid
@@ -58,11 +59,11 @@ bool FixBTree = true;
static void _bt_restscan(IndexScanDesc scan);
static void btbuildCallback(Relation index,
- HeapTuple htup,
- Datum *attdata,
- char *nulls,
- bool tupleIsAlive,
- void *state);
+ HeapTuple htup,
+ Datum *attdata,
+ char *nulls,
+ bool tupleIsAlive,
+ void *state);
/*
@@ -134,6 +135,7 @@ btbuild(PG_FUNCTION_ARGS)
if (buildstate.usefast)
{
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique);
+
/*
* Different from spool, the uniqueness isn't checked for spool2.
*/
@@ -214,7 +216,7 @@ btbuildCallback(Relation index,
bool tupleIsAlive,
void *state)
{
- BTBuildState *buildstate = (BTBuildState *) state;
+ BTBuildState *buildstate = (BTBuildState *) state;
IndexTuple itup;
BTItem btitem;
InsertIndexResult res;
@@ -226,9 +228,9 @@ btbuildCallback(Relation index,
btitem = _bt_formitem(itup);
/*
- * if we are doing bottom-up btree build, we insert the index into
- * a spool file for subsequent processing. otherwise, we insert
- * into the btree.
+ * if we are doing bottom-up btree build, we insert the index into a
+ * spool file for subsequent processing. otherwise, we insert into
+ * the btree.
*/
if (buildstate->usefast)
{
@@ -305,7 +307,6 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
-
/*
* Restore scan position using heap TID returned by previous call
* to btgettuple(). _bt_restscan() re-grabs the read lock on the
@@ -321,7 +322,7 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read
* lock on the buffer so that we aren't blocking other backends.
*
- * NOTE: we do keep the pin on the buffer! This is essential to ensure
+ * NOTE: we do keep the pin on the buffer! This is essential to ensure
* that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
@@ -362,7 +363,6 @@ btrescan(PG_FUNCTION_ARGS)
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
bool fromEnd = PG_GETARG_BOOL(1);
-
#endif
ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
@@ -547,7 +547,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
void *callback_state = (void *) PG_GETARG_POINTER(2);
IndexBulkDeleteResult *result;
- BlockNumber num_pages;
+ BlockNumber num_pages;
double tuples_removed;
double num_index_tuples;
RetrieveIndexResult res;
@@ -559,15 +559,16 @@ btbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples = 0;
/*
- * We use a standard IndexScanDesc scan object, but to speed up the loop,
- * we skip most of the wrapper layers of index_getnext and instead call
- * _bt_step directly. This implies holding buffer lock on a target page
- * throughout the loop over the page's tuples. Initially, we have a read
- * lock acquired by _bt_step when we stepped onto the page. If we find
- * a tuple we need to delete, we trade in the read lock for an exclusive
- * write lock; after that, we hold the write lock until we step off the
- * page (fortunately, _bt_relbuf doesn't care which kind of lock it's
- * releasing). This should minimize the amount of work needed per page.
+ * We use a standard IndexScanDesc scan object, but to speed up the
+ * loop, we skip most of the wrapper layers of index_getnext and
+ * instead call _bt_step directly. This implies holding buffer lock
+ * on a target page throughout the loop over the page's tuples.
+ * Initially, we have a read lock acquired by _bt_step when we stepped
+ * onto the page. If we find a tuple we need to delete, we trade in
+ * the read lock for an exclusive write lock; after that, we hold the
+ * write lock until we step off the page (fortunately, _bt_relbuf
+ * doesn't care which kind of lock it's releasing). This should
+ * minimize the amount of work needed per page.
*/
scan = index_beginscan(rel, false, 0, (ScanKey) NULL);
so = (BTScanOpaque) scan->opaque;
@@ -579,7 +580,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
if (res != NULL)
{
Buffer buf;
- BlockNumber lockedBlock = InvalidBlockNumber;
+ BlockNumber lockedBlock = InvalidBlockNumber;
pfree(res);
/* we have the buffer pinned and locked */
@@ -589,11 +590,11 @@ btbulkdelete(PG_FUNCTION_ARGS)
do
{
Page page;
- BlockNumber blkno;
+ BlockNumber blkno;
OffsetNumber offnum;
BTItem btitem;
IndexTuple itup;
- ItemPointer htup;
+ ItemPointer htup;
/* current is the next index tuple */
blkno = ItemPointerGetBlockNumber(current);
@@ -607,9 +608,10 @@ btbulkdelete(PG_FUNCTION_ARGS)
{
/*
* If this is first deletion on this page, trade in read
- * lock for a really-exclusive write lock. Then, step back
- * one and re-examine the item, because someone else might
- * have inserted an item while we weren't holding the lock!
+ * lock for a really-exclusive write lock. Then, step
+ * back one and re-examine the item, because someone else
+ * might have inserted an item while we weren't holding
+ * the lock!
*/
if (blkno != lockedBlock)
{
@@ -632,8 +634,8 @@ btbulkdelete(PG_FUNCTION_ARGS)
* We need to back up the scan one item so that the next
* cycle will re-examine the same offnum on this page.
*
- * For now, just hack the current-item index. Will need
- * to be smarter when deletion includes removal of empty
+ * For now, just hack the current-item index. Will need to
+ * be smarter when deletion includes removal of empty
* index pages.
*/
current->ip_posid--;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 86ff810845c..c1a82f2b856 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.68 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.69 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -589,10 +589,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* At this point we are positioned at the first item >= scan key, or
- * possibly at the end of a page on which all the existing items are
- * greater than the scan key and we know that everything on later pages
- * is less than or equal to scan key.
- *
+ * possibly at the end of a page on which all the existing items are
+ * greater than the scan key and we know that everything on later
+ * pages is less than or equal to scan key.
+ *
* We could step forward in the latter case, but that'd be a waste of
* time if we want to scan backwards. So, it's now time to examine
* the scan strategy to find the exact place to start the scan.
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 2aca6bf7cfc..4b327bff45f 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.61 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,7 +108,7 @@ static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2);
/*
* create and initialize a spool structure
*/
-BTSpool *
+BTSpool *
_bt_spoolinit(Relation index, bool isunique)
{
BTSpool *btspool = (BTSpool *) palloc(sizeof(BTSpool));
@@ -354,7 +354,6 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
if (pgspc < btisz || pgspc < state->btps_full)
{
-
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@@ -544,7 +543,6 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
if (merge)
{
-
/*
* Another BTSpool for dead tuples exists. Now we have to merge
* btspool and btspool2.
diff --git a/src/backend/access/nbtree/nbtstrat.c b/src/backend/access/nbtree/nbtstrat.c
index 4045496979c..533af33681a 100644
--- a/src/backend/access/nbtree/nbtstrat.c
+++ b/src/backend/access/nbtree/nbtstrat.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.14 2001/05/30 19:53:40 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.15 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,5 +134,4 @@ _bt_invokestrat(Relation rel,
return (RelationInvokeStrategy(rel, &BTEvaluationData, attno, strat,
left, right));
}
-
#endif
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 86d2e3cf8fa..55c4c323001 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.46 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.47 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -221,7 +221,6 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/* We can short-circuit most of the work if there's just one key */
if (numberOfKeys == 1)
{
-
/*
* We don't use indices for 'A is null' and 'A is not null'
* currently and 'A < = > <> NULL' will always fail - so qual is
@@ -317,7 +316,6 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
-
/*
* No "=" for this key, so we're done with required keys
*/
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 42878248b00..0e8305bdfba 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.65 2001/10/06 23:21:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.66 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,9 +58,9 @@ typedef struct SPLITVEC
/* for sorting tuples by cost, for picking split */
typedef struct SPLITCOST
{
- OffsetNumber offset_number;
- float cost_differential;
- bool choose_left;
+ OffsetNumber offset_number;
+ float cost_differential;
+ bool choose_left;
} SPLITCOST;
typedef struct RTSTATE
@@ -79,11 +79,11 @@ typedef struct
/* non-export function prototypes */
static void rtbuildCallback(Relation index,
- HeapTuple htup,
- Datum *attdata,
- char *nulls,
- bool tupleIsAlive,
- void *state);
+ HeapTuple htup,
+ Datum *attdata,
+ char *nulls,
+ bool tupleIsAlive,
+ void *state);
static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
RTSTATE *rtstate);
static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size,
@@ -100,7 +100,7 @@ static OffsetNumber choose(Relation r, Page p, IndexTuple it,
RTSTATE *rtstate);
static int nospace(Page p, IndexTuple it);
static void initRtstate(RTSTATE *rtstate, Relation index);
-static int qsort_comp_splitcost(const void *a, const void *b);
+static int qsort_comp_splitcost(const void *a, const void *b);
/*
@@ -178,7 +178,7 @@ rtbuildCallback(Relation index,
bool tupleIsAlive,
void *state)
{
- RTBuildState *buildstate = (RTBuildState *) state;
+ RTBuildState *buildstate = (RTBuildState *) state;
IndexTuple itup;
InsertIndexResult res;
@@ -194,11 +194,11 @@ rtbuildCallback(Relation index,
}
/*
- * Since we already have the index relation locked, we call
- * rtdoinsert directly. Normal access method calls dispatch
- * through rtinsert, which locks the relation for write. This is
- * the right thing to do if you're inserting single tups, but not
- * when you're initializing the whole index at once.
+ * Since we already have the index relation locked, we call rtdoinsert
+ * directly. Normal access method calls dispatch through rtinsert,
+ * which locks the relation for write. This is the right thing to do
+ * if you're inserting single tups, but not when you're initializing
+ * the whole index at once.
*/
res = rtdoinsert(index, itup, &buildstate->rtState);
@@ -223,6 +223,7 @@ rtinsert(PG_FUNCTION_ARGS)
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
char *nulls = (char *) PG_GETARG_POINTER(2);
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
#endif
@@ -249,7 +250,7 @@ rtinsert(PG_FUNCTION_ARGS)
/*
* Since rtree is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
+ * have acquired exclusive lock on index relation. We need no locking
* here.
*/
@@ -376,9 +377,8 @@ rttighten(Relation r,
PointerGetDatum(&newd_size));
/*
- * If newd_size == 0 we have degenerate rectangles, so we
- * don't know if there was any change, so we have to
- * assume there was.
+ * If newd_size == 0 we have degenerate rectangles, so we don't know
+ * if there was any change, so we have to assume there was.
*/
if ((newd_size == 0) || (newd_size != old_size))
{
@@ -386,7 +386,6 @@ rttighten(Relation r,
if (td->attrs[0]->attlen < 0)
{
-
/*
* This is an internal page, so 'oldud' had better be a union
* (constant-length) key, too. (See comment below.)
@@ -500,10 +499,10 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/*
- * spl_left contains a list of the offset numbers of the
- * tuples that will go to the left page. For each offset
- * number, get the tuple item, then add the item to the
- * left page. Similarly for the right side.
+ * spl_left contains a list of the offset numbers of the tuples that
+ * will go to the left page. For each offset number, get the tuple
+ * item, then add the item to the left page. Similarly for the right
+ * side.
*/
/* fill left node */
@@ -527,7 +526,7 @@ rtdosplit(Relation r,
if (i == newitemoff)
ItemPointerSet(&(res->pointerData), lbknum, leftoff);
- spl_left++; /* advance in left split vector */
+ spl_left++; /* advance in left split vector */
}
/* fill right node */
@@ -551,7 +550,7 @@ rtdosplit(Relation r,
if (i == newitemoff)
ItemPointerSet(&(res->pointerData), rbknum, rightoff);
- spl_right++; /* advance in right split vector */
+ spl_right++; /* advance in right split vector */
}
/* Make sure we consumed all of the split vectors, and release 'em */
@@ -764,9 +763,10 @@ rtpicksplit(Relation r,
right_avail_space;
int total_num_tuples,
num_tuples_without_seeds,
- max_after_split; /* in Guttman's lingo, (M - m) */
- float diff; /* diff between cost of putting tuple left or right */
- SPLITCOST *cost_vector;
+ max_after_split; /* in Guttman's lingo, (M - m) */
+ float diff; /* diff between cost of putting tuple left
+ * or right */
+ SPLITCOST *cost_vector;
int n;
/*
@@ -852,7 +852,6 @@ rtpicksplit(Relation r,
if (firsttime)
{
-
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@@ -885,25 +884,25 @@ rtpicksplit(Relation r,
/*
* Now split up the regions between the two seeds.
*
- * The cost_vector array will contain hints for determining where
- * each tuple should go. Each record in the array will contain
- * a boolean, choose_left, that indicates which node the tuple
- * prefers to be on, and the absolute difference in cost between
- * putting the tuple in its favored node and in the other node.
+ * The cost_vector array will contain hints for determining where each
+ * tuple should go. Each record in the array will contain a boolean,
+ * choose_left, that indicates which node the tuple prefers to be on,
+ * and the absolute difference in cost between putting the tuple in
+ * its favored node and in the other node.
*
* Later, we will sort the cost_vector in descending order by cost
- * difference, and consider the tuples in that order for
- * placement. That way, the tuples that *really* want to be in
- * one node or the other get to choose first, and the tuples that
- * don't really care choose last.
+ * difference, and consider the tuples in that order for placement.
+ * That way, the tuples that *really* want to be in one node or the
+ * other get to choose first, and the tuples that don't really care
+ * choose last.
*
- * First, build the cost_vector array. The new index tuple will
- * also be handled in this loop, and represented in the array,
- * with i==newitemoff.
+ * First, build the cost_vector array. The new index tuple will also be
+ * handled in this loop, and represented in the array, with
+ * i==newitemoff.
*
- * In the case of variable size tuples it is possible that we only
- * have the two seeds and no other tuples, in which case we don't
- * do any of this cost_vector stuff.
+ * In the case of variable size tuples it is possible that we only have
+ * the two seeds and no other tuples, in which case we don't do any of
+ * this cost_vector stuff.
*/
/* to keep compiler quiet */
@@ -943,21 +942,21 @@ rtpicksplit(Relation r,
}
/*
- * Sort the array. The function qsort_comp_splitcost is
- * set up "backwards", to provided descending order.
+ * Sort the array. The function qsort_comp_splitcost is set up
+ * "backwards", to provided descending order.
*/
qsort(cost_vector, num_tuples_without_seeds, sizeof(SPLITCOST),
&qsort_comp_splitcost);
}
/*
- * Now make the final decisions about where each tuple will go,
- * and build the vectors to return in the SPLITVEC record.
+ * Now make the final decisions about where each tuple will go, and
+ * build the vectors to return in the SPLITVEC record.
*
- * The cost_vector array contains (descriptions of) all the
- * tuples, in the order that we want to consider them, so we
- * we just iterate through it and place each tuple in left
- * or right nodes, according to the criteria described below.
+ * The cost_vector array contains (descriptions of) all the tuples, in
+ * the order that we want to consider them, so we we just iterate
+ * through it and place each tuple in left or right nodes, according
+ * to the criteria described below.
*/
left = v->spl_left;
@@ -965,9 +964,9 @@ rtpicksplit(Relation r,
right = v->spl_right;
v->spl_nright = 0;
- /* Place the seeds first.
- * left avail space, left union, right avail space, and right
- * union have already been adjusted for the seeds.
+ /*
+ * Place the seeds first. left avail space, left union, right avail
+ * space, and right union have already been adjusted for the seeds.
*/
*left++ = seed_1;
@@ -983,8 +982,8 @@ rtpicksplit(Relation r,
choose_left;
/*
- * We need to figure out which page needs the least
- * enlargement in order to store the item.
+ * We need to figure out which page needs the least enlargement in
+ * order to store the item.
*/
i = cost_vector[n].offset_number;
@@ -1019,22 +1018,22 @@ rtpicksplit(Relation r,
* the new item.)
*
* Guttman's algorithm actually has two factors to consider (in
- * order): 1. if one node has so many tuples already assigned to
+ * order): 1. if one node has so many tuples already assigned to
* it that the other needs all the rest in order to satisfy the
- * condition that neither node has fewer than m tuples, then
- * that is decisive; 2. otherwise, choose the page that shows
- * the smaller enlargement of its union area.
+ * condition that neither node has fewer than m tuples, then that
+ * is decisive; 2. otherwise, choose the page that shows the
+ * smaller enlargement of its union area.
*
- * I have chosen m = M/2, where M is the maximum number of
- * tuples on a page. (Actually, this is only strictly
- * true for fixed size tuples. For variable size tuples,
- * there still might have to be only one tuple on a page,
- * if it is really big. But even with variable size
- * tuples we still try to get m as close as possible to M/2.)
+ * I have chosen m = M/2, where M is the maximum number of tuples on
+ * a page. (Actually, this is only strictly true for fixed size
+ * tuples. For variable size tuples, there still might have to be
+ * only one tuple on a page, if it is really big. But even with
+ * variable size tuples we still try to get m as close as possible
+ * to M/2.)
*
- * The question of which page shows the smaller enlargement of
- * its union area has already been answered, and the answer
- * stored in the choose_left field of the SPLITCOST record.
+ * The question of which page shows the smaller enlargement of its
+ * union area has already been answered, and the answer stored in
+ * the choose_left field of the SPLITCOST record.
*/
left_feasible = (left_avail_space >= item_1_sz &&
((left_avail_space - item_1_sz) >= newitemsz ||
@@ -1045,10 +1044,10 @@ rtpicksplit(Relation r,
if (left_feasible && right_feasible)
{
/*
- * Both feasible, use Guttman's algorithm.
- * First check the m condition described above, and if
- * that doesn't apply, choose the page with the smaller
- * enlargement of its union area.
+ * Both feasible, use Guttman's algorithm. First check the m
+ * condition described above, and if that doesn't apply,
+ * choose the page with the smaller enlargement of its union
+ * area.
*/
if (v->spl_nleft > max_after_split)
choose_left = false;
@@ -1064,7 +1063,7 @@ rtpicksplit(Relation r,
else
{
elog(ERROR, "rtpicksplit: failed to find a workable page split");
- choose_left = false;/* keep compiler quiet */
+ choose_left = false; /* keep compiler quiet */
}
if (choose_left)
@@ -1090,9 +1089,7 @@ rtpicksplit(Relation r,
}
if (num_tuples_without_seeds > 0)
- {
pfree(cost_vector);
- }
*left = *right = InvalidOffsetNumber; /* add ending sentinels */
@@ -1189,7 +1186,7 @@ rtbulkdelete(PG_FUNCTION_ARGS)
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
void *callback_state = (void *) PG_GETARG_POINTER(2);
IndexBulkDeleteResult *result;
- BlockNumber num_pages;
+ BlockNumber num_pages;
double tuples_removed;
double num_index_tuples;
RetrieveIndexResult res;
@@ -1200,7 +1197,7 @@ rtbulkdelete(PG_FUNCTION_ARGS)
/*
* Since rtree is not marked "amconcurrent" in pg_am, caller should
- * have acquired exclusive lock on index relation. We need no locking
+ * have acquired exclusive lock on index relation. We need no locking
* here.
*/
@@ -1279,9 +1276,10 @@ initRtstate(RTSTATE *rtstate, Relation index)
static int
qsort_comp_splitcost(const void *a, const void *b)
{
- float diff =
- ((SPLITCOST *)a)->cost_differential -
- ((SPLITCOST *)b)->cost_differential;
+ float diff =
+ ((SPLITCOST *) a)->cost_differential -
+ ((SPLITCOST *) b)->cost_differential;
+
if (diff < 0)
return 1;
else if (diff > 0)
@@ -1342,7 +1340,6 @@ _rtdump(Relation r)
ReleaseBuffer(buf);
}
}
-
#endif /* defined RTDEBUG */
void
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 1311cfdc29a..c6883fd041b 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.38 2001/07/15 22:48:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.39 2001/10/25 05:49:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -380,7 +380,6 @@ adjustiptr(IndexScanDesc s,
}
else
{
-
/*
* remember that we're before the current
* tuple
diff --git a/src/backend/access/rtree/rtstrat.c b/src/backend/access/rtree/rtstrat.c
index 74ee6a39a46..41de4a4e3e6 100644
--- a/src/backend/access/rtree/rtstrat.c
+++ b/src/backend/access/rtree/rtstrat.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtstrat.c,v 1.17 2001/05/30 19:53:40 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtstrat.c,v 1.18 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,7 +163,7 @@ static StrategyExpression RTEvaluationExpressions[RTNStrategies] = {
NULL, /* express overlap */
NULL, /* express overright */
NULL, /* express right */
- (StrategyExpression) RTEqualExpressionData, /* express same */
+ (StrategyExpression) RTEqualExpressionData, /* express same */
NULL, /* express contains */
NULL /* express contained-by */
};
@@ -221,7 +221,6 @@ RelationInvokeRTStrategy(Relation r,
return (RelationInvokeStrategy(r, &RTEvaluationData, attnum, s,
left, right));
}
-
#endif
RegProcedure
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index cd83da93ea6..1436b32aa16 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.4 2001/09/29 04:02:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.5 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
/*
* Defines for CLOG page and segment sizes. A page is the same BLCKSZ
- * as is used everywhere else in Postgres. The CLOG segment size can be
+ * as is used everywhere else in Postgres. The CLOG segment size can be
* chosen somewhat arbitrarily; we make it 1 million transactions by default,
* or 256Kb.
*
@@ -48,15 +48,15 @@
/* We need two bits per xact, so four xacts fit in a byte */
#define CLOG_BITS_PER_XACT 2
-#define CLOG_XACTS_PER_BYTE 4
-#define CLOG_XACTS_PER_PAGE (CLOG_BLCKSZ * CLOG_XACTS_PER_BYTE)
+#define CLOG_XACTS_PER_BYTE 4
+#define CLOG_XACTS_PER_PAGE (CLOG_BLCKSZ * CLOG_XACTS_PER_BYTE)
#define CLOG_XACT_BITMASK ((1 << CLOG_BITS_PER_XACT) - 1)
#define CLOG_XACTS_PER_SEGMENT 0x100000
#define CLOG_PAGES_PER_SEGMENT (CLOG_XACTS_PER_SEGMENT / CLOG_XACTS_PER_PAGE)
#define TransactionIdToPage(xid) ((xid) / (TransactionId) CLOG_XACTS_PER_PAGE)
-#define TransactionIdToPgIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE)
+#define TransactionIdToPgIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE)
#define TransactionIdToByte(xid) (TransactionIdToPgIndex(xid) / CLOG_XACTS_PER_BYTE)
#define TransactionIdToBIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_BYTE)
@@ -101,15 +101,15 @@
* the control lock.
*
* As with the regular buffer manager, it is possible for another process
- * to re-dirty a page that is currently being written out. This is handled
+ * to re-dirty a page that is currently being written out. This is handled
* by setting the page's state from WRITE_IN_PROGRESS to DIRTY. The writing
* process must notice this and not mark the page CLEAN when it's done.
*
* XLOG interactions: this module generates an XLOG record whenever a new
- * CLOG page is initialized to zeroes. Other writes of CLOG come from
+ * CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
- * on redo; so we need make no additional XLOG entry here. Also, the XLOG
+ * on redo; so we need make no additional XLOG entry here. Also, the XLOG
* is guaranteed flushed through the XLOG commit record before we are called
* to log a commit, so the WAL rule "write xlog before data" is satisfied
* automatically for commits, and we don't really care for aborts. Therefore,
@@ -120,11 +120,13 @@
typedef enum
{
- CLOG_PAGE_EMPTY, /* CLOG buffer is not in use */
- CLOG_PAGE_READ_IN_PROGRESS, /* CLOG page is being read in */
- CLOG_PAGE_CLEAN, /* CLOG page is valid and not dirty */
- CLOG_PAGE_DIRTY, /* CLOG page is valid but needs write */
- CLOG_PAGE_WRITE_IN_PROGRESS /* CLOG page is being written out in */
+ CLOG_PAGE_EMPTY,/* CLOG buffer is not in use */
+ CLOG_PAGE_READ_IN_PROGRESS, /* CLOG page is being read
+ * in */
+ CLOG_PAGE_CLEAN,/* CLOG page is valid and not dirty */
+ CLOG_PAGE_DIRTY,/* CLOG page is valid but needs write */
+ CLOG_PAGE_WRITE_IN_PROGRESS /* CLOG page is being
+ * written out in */
} ClogPageStatus;
/*
@@ -134,14 +136,15 @@ typedef struct ClogCtlData
{
/*
* Info for each buffer slot. Page number is undefined when status is
- * EMPTY. lru_count is essentially the number of operations since last
- * use of this page; the page with highest lru_count is the best candidate
- * to replace.
+ * EMPTY. lru_count is essentially the number of operations since
+ * last use of this page; the page with highest lru_count is the best
+ * candidate to replace.
*/
char *page_buffer[NUM_CLOG_BUFFERS];
- ClogPageStatus page_status[NUM_CLOG_BUFFERS];
+ ClogPageStatus page_status[NUM_CLOG_BUFFERS];
int page_number[NUM_CLOG_BUFFERS];
- unsigned int page_lru_count[NUM_CLOG_BUFFERS];
+ unsigned int page_lru_count[NUM_CLOG_BUFFERS];
+
/*
* latest_page_number is the page number of the current end of the
* CLOG; this is not critical data, since we use it only to avoid
@@ -157,7 +160,7 @@ static ClogCtlData *ClogCtl = NULL;
* The value is automatically inherited by backends via fork, and
* doesn't need to be in shared memory.
*/
-static LWLockId ClogBufferLocks[NUM_CLOG_BUFFERS]; /* Per-buffer I/O locks */
+static LWLockId ClogBufferLocks[NUM_CLOG_BUFFERS]; /* Per-buffer I/O locks */
/*
* ClogDir is set during CLOGShmemInit and does not change thereafter.
@@ -166,7 +169,7 @@ static LWLockId ClogBufferLocks[NUM_CLOG_BUFFERS]; /* Per-buffer I/O locks */
*/
static char ClogDir[MAXPGPATH];
-#define ClogFileName(path, seg) \
+#define ClogFileName(path, seg) \
snprintf(path, MAXPGPATH, "%s/%04X", ClogDir, seg)
/*
@@ -430,7 +433,7 @@ ReadCLOGPage(int pageno)
LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
Assert(ClogCtl->page_number[slotno] == pageno &&
- ClogCtl->page_status[slotno] == CLOG_PAGE_READ_IN_PROGRESS);
+ ClogCtl->page_status[slotno] == CLOG_PAGE_READ_IN_PROGRESS);
ClogCtl->page_status[slotno] = CLOG_PAGE_CLEAN;
@@ -447,7 +450,7 @@ ReadCLOGPage(int pageno)
*
* NOTE: only one write attempt is made here. Hence, it is possible that
* the page is still dirty at exit (if someone else re-dirtied it during
- * the write). However, we *do* attempt a fresh write even if the page
+ * the write). However, we *do* attempt a fresh write even if the page
* is already being written; this is for checkpoints.
*
* Control lock must be held at entry, and will be held at exit.
@@ -455,7 +458,7 @@ ReadCLOGPage(int pageno)
static void
WriteCLOGPage(int slotno)
{
- int pageno;
+ int pageno;
/* Do nothing if page does not need writing */
if (ClogCtl->page_status[slotno] != CLOG_PAGE_DIRTY &&
@@ -489,11 +492,12 @@ WriteCLOGPage(int slotno)
* update on this page will mark it dirty again. NB: we are assuming
* that read/write of the page status field is atomic, since we change
* the state while not holding control lock. However, we cannot set
- * this state any sooner, or we'd possibly fool a previous writer
- * into thinking he's successfully dumped the page when he hasn't.
- * (Scenario: other writer starts, page is redirtied, we come along and
- * set WRITE_IN_PROGRESS again, other writer completes and sets CLEAN
- * because redirty info has been lost, then we think it's clean too.)
+ * this state any sooner, or we'd possibly fool a previous writer into
+ * thinking he's successfully dumped the page when he hasn't.
+ * (Scenario: other writer starts, page is redirtied, we come along
+ * and set WRITE_IN_PROGRESS again, other writer completes and sets
+ * CLEAN because redirty info has been lost, then we think it's clean
+ * too.)
*/
ClogCtl->page_status[slotno] = CLOG_PAGE_WRITE_IN_PROGRESS;
@@ -523,7 +527,7 @@ WriteCLOGPage(int slotno)
static void
CLOGPhysicalReadPage(int pageno, int slotno)
{
- int segno = pageno / CLOG_PAGES_PER_SEGMENT;
+ int segno = pageno / CLOG_PAGES_PER_SEGMENT;
int rpageno = pageno % CLOG_PAGES_PER_SEGMENT;
int offset = rpageno * CLOG_BLCKSZ;
char path[MAXPGPATH];
@@ -533,9 +537,9 @@ CLOGPhysicalReadPage(int pageno, int slotno)
/*
* In a crash-and-restart situation, it's possible for us to receive
- * commands to set the commit status of transactions whose bits are
- * in already-truncated segments of the commit log (see notes in
- * CLOGPhysicalWritePage). Hence, if we are InRecovery, allow the
+ * commands to set the commit status of transactions whose bits are in
+ * already-truncated segments of the commit log (see notes in
+ * CLOGPhysicalWritePage). Hence, if we are InRecovery, allow the
* case where the file doesn't exist, and return zeroes instead.
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
@@ -569,7 +573,7 @@ CLOGPhysicalReadPage(int pageno, int slotno)
static void
CLOGPhysicalWritePage(int pageno, int slotno)
{
- int segno = pageno / CLOG_PAGES_PER_SEGMENT;
+ int segno = pageno / CLOG_PAGES_PER_SEGMENT;
int rpageno = pageno % CLOG_PAGES_PER_SEGMENT;
int offset = rpageno * CLOG_BLCKSZ;
char path[MAXPGPATH];
@@ -578,16 +582,17 @@ CLOGPhysicalWritePage(int pageno, int slotno)
ClogFileName(path, segno);
/*
- * If the file doesn't already exist, we should create it. It is possible
- * for this to need to happen when writing a page that's not first in
- * its segment; we assume the OS can cope with that. (Note: it might seem
- * that it'd be okay to create files only when ZeroCLOGPage is called for
- * the first page of a segment. However, if after a crash and restart
- * the REDO logic elects to replay the log from a checkpoint before the
- * latest one, then it's possible that we will get commands to set
- * transaction status of transactions that have already been truncated
- * from the commit log. Easiest way to deal with that is to accept
- * references to nonexistent files here and in CLOGPhysicalReadPage.)
+ * If the file doesn't already exist, we should create it. It is
+ * possible for this to need to happen when writing a page that's not
+ * first in its segment; we assume the OS can cope with that. (Note:
+ * it might seem that it'd be okay to create files only when
+ * ZeroCLOGPage is called for the first page of a segment. However,
+ * if after a crash and restart the REDO logic elects to replay the
+ * log from a checkpoint before the latest one, then it's possible
+ * that we will get commands to set transaction status of transactions
+ * that have already been truncated from the commit log. Easiest way
+ * to deal with that is to accept references to nonexistent files here
+ * and in CLOGPhysicalReadPage.)
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -649,16 +654,15 @@ SelectLRUCLOGPage(int pageno)
}
/*
- * If we find any EMPTY slot, just select that one.
- * Else locate the least-recently-used slot that isn't the
- * latest CLOG page.
+ * If we find any EMPTY slot, just select that one. Else locate
+ * the least-recently-used slot that isn't the latest CLOG page.
*/
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
if (ClogCtl->page_status[slotno] == CLOG_PAGE_EMPTY)
return slotno;
if (ClogCtl->page_lru_count[slotno] > bestcount &&
- ClogCtl->page_number[slotno] != ClogCtl->latest_page_number)
+ ClogCtl->page_number[slotno] != ClogCtl->latest_page_number)
{
bestslot = slotno;
bestcount = ClogCtl->page_lru_count[slotno];
@@ -672,10 +676,10 @@ SelectLRUCLOGPage(int pageno)
return bestslot;
/*
- * We need to do I/O. Normal case is that we have to write it out,
- * but it's possible in the worst case to have selected a read-busy
- * page. In that case we use ReadCLOGPage to wait for the read to
- * complete.
+ * We need to do I/O. Normal case is that we have to write it
+ * out, but it's possible in the worst case to have selected a
+ * read-busy page. In that case we use ReadCLOGPage to wait for
+ * the read to complete.
*/
if (ClogCtl->page_status[bestslot] == CLOG_PAGE_READ_IN_PROGRESS)
(void) ReadCLOGPage(ClogCtl->page_number[bestslot]);
@@ -683,9 +687,9 @@ SelectLRUCLOGPage(int pageno)
WriteCLOGPage(bestslot);
/*
- * Now loop back and try again. This is the easiest way of dealing
- * with corner cases such as the victim page being re-dirtied while
- * we wrote it.
+ * Now loop back and try again. This is the easiest way of
+ * dealing with corner cases such as the victim page being
+ * re-dirtied while we wrote it.
*/
}
}
@@ -736,6 +740,7 @@ CheckPointCLOG(void)
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
WriteCLOGPage(slotno);
+
/*
* We cannot assert that the slot is clean now, since another
* process might have re-dirtied it already. That's okay.
@@ -782,13 +787,13 @@ ExtendCLOG(TransactionId newestXact)
* Remove all CLOG segments before the one holding the passed transaction ID
*
* When this is called, we know that the database logically contains no
- * reference to transaction IDs older than oldestXact. However, we must
+ * reference to transaction IDs older than oldestXact. However, we must
* not truncate the CLOG until we have performed a checkpoint, to ensure
* that no such references remain on disk either; else a crash just after
* the truncation might leave us with a problem. Since CLOG segments hold
* a large number of transactions, the opportunity to actually remove a
* segment is fairly rare, and so it seems best not to do the checkpoint
- * unless we have confirmed that there is a removable segment. Therefore
+ * unless we have confirmed that there is a removable segment. Therefore
* we issue the checkpoint command here, not in higher-level code as might
* seem cleaner.
*/
@@ -813,15 +818,16 @@ TruncateCLOG(TransactionId oldestXact)
/*
* Scan CLOG shared memory and remove any pages preceding the cutoff
* page, to ensure we won't rewrite them later. (Any dirty pages
- * should have been flushed already during the checkpoint, we're
- * just being extra careful here.)
+ * should have been flushed already during the checkpoint, we're just
+ * being extra careful here.)
*/
LWLockAcquire(CLogControlLock, LW_EXCLUSIVE);
restart:;
+
/*
- * While we are holding the lock, make an important safety check:
- * the planned cutoff point must be <= the current CLOG endpoint page.
+ * While we are holding the lock, make an important safety check: the
+ * planned cutoff point must be <= the current CLOG endpoint page.
* Otherwise we have already wrapped around, and proceeding with the
* truncation would risk removing the current CLOG segment.
*/
@@ -838,6 +844,7 @@ restart:;
continue;
if (!CLOGPagePrecedes(ClogCtl->page_number[slotno], cutoffPage))
continue;
+
/*
* If page is CLEAN, just change state to EMPTY (expected case).
*/
@@ -846,6 +853,7 @@ restart:;
ClogCtl->page_status[slotno] = CLOG_PAGE_EMPTY;
continue;
}
+
/*
* Hmm, we have (or may have) I/O operations acting on the page,
* so we've got to wait for them to finish and then start again.
@@ -928,9 +936,11 @@ CLOGPagePrecedes(int page1, int page2)
TransactionId xid1;
TransactionId xid2;
- xid1 = (TransactionId) page1 * CLOG_XACTS_PER_PAGE;
+ xid1 = (TransactionId) page1 *CLOG_XACTS_PER_PAGE;
+
xid1 += FirstNormalTransactionId;
- xid2 = (TransactionId) page2 * CLOG_XACTS_PER_PAGE;
+ xid2 = (TransactionId) page2 *CLOG_XACTS_PER_PAGE;
+
xid2 += FirstNormalTransactionId;
return TransactionIdPrecedes(xid1, xid2);
@@ -966,8 +976,8 @@ clog_redo(XLogRecPtr lsn, XLogRecord *record)
if (info == CLOG_ZEROPAGE)
{
- int pageno;
- int slotno;
+ int pageno;
+ int slotno;
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
@@ -993,7 +1003,7 @@ clog_desc(char *buf, uint8 xl_info, char *rec)
if (info == CLOG_ZEROPAGE)
{
- int pageno;
+ int pageno;
memcpy(&pageno, rec, sizeof(int));
sprintf(buf + strlen(buf), "zeropage: %d", pageno);
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 2a73c045b76..bb94fc27a13 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.48 2001/08/26 16:55:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.49 2001/10/25 05:49:22 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -32,7 +32,7 @@ static void TransactionLogUpdate(TransactionId transactionId,
* ----------------
*/
static TransactionId cachedTestXid = InvalidTransactionId;
-static XidStatus cachedTestXidStatus;
+static XidStatus cachedTestXidStatus;
/* ----------------------------------------------------------------
@@ -56,8 +56,8 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
XidStatus xidstatus; /* recorded status of xid */
/*
- * Before going to the commit log manager, check our single item cache to
- * see if we didn't just check the transaction status a moment ago.
+ * Before going to the commit log manager, check our single item cache
+ * to see if we didn't just check the transaction status a moment ago.
*/
if (TransactionIdEquals(transactionId, cachedTestXid))
return (status == cachedTestXidStatus);
@@ -65,7 +65,7 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
/*
* Also, check to see if the transaction ID is a permanent one.
*/
- if (! TransactionIdIsNormal(transactionId))
+ if (!TransactionIdIsNormal(transactionId))
{
if (TransactionIdEquals(transactionId, BootstrapTransactionId))
return (status == TRANSACTION_STATUS_COMMITTED);
@@ -77,18 +77,18 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
/*
* Get the status.
*/
- xidstatus = TransactionIdGetStatus(transactionId);
-
- /*
- * DO NOT cache status for unfinished transactions!
- */
- if (xidstatus != TRANSACTION_STATUS_IN_PROGRESS)
- {
- TransactionIdStore(transactionId, &cachedTestXid);
- cachedTestXidStatus = xidstatus;
- }
-
- return (status == xidstatus);
+ xidstatus = TransactionIdGetStatus(transactionId);
+
+ /*
+ * DO NOT cache status for unfinished transactions!
+ */
+ if (xidstatus != TRANSACTION_STATUS_IN_PROGRESS)
+ {
+ TransactionIdStore(transactionId, &cachedTestXid);
+ cachedTestXidStatus = xidstatus;
+ }
+
+ return (status == xidstatus);
}
/* --------------------------------
@@ -197,7 +197,7 @@ TransactionIdIsInProgress(TransactionId transactionId)
return TransactionLogTest(transactionId, TRANSACTION_STATUS_IN_PROGRESS);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* --------------------------------
* TransactionId Commit
@@ -246,7 +246,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2)
{
/*
* If either ID is a permanent XID then we can just do unsigned
- * comparison. If both are normal, do a modulo-2^31 comparison.
+ * comparison. If both are normal, do a modulo-2^31 comparison.
*/
int32 diff;
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 048080a180f..5a56f47e064 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.46 2001/09/29 04:02:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.47 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,7 @@ VariableCache ShmemVariableCache = NULL;
TransactionId
GetNewTransactionId(void)
{
- TransactionId xid;
+ TransactionId xid;
/*
* During bootstrap initialization, we return the special bootstrap
@@ -48,32 +48,32 @@ GetNewTransactionId(void)
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
- * If we have just allocated the first XID of a new page of the
- * commit log, zero out that commit-log page before returning.
- * We must do this while holding XidGenLock, else another xact could
- * acquire and commit a later XID before we zero the page. Fortunately,
- * a page of the commit log holds 32K or more transactions, so we don't
- * have to do this very often.
+ * If we have just allocated the first XID of a new page of the commit
+ * log, zero out that commit-log page before returning. We must do
+ * this while holding XidGenLock, else another xact could acquire and
+ * commit a later XID before we zero the page. Fortunately, a page of
+ * the commit log holds 32K or more transactions, so we don't have to
+ * do this very often.
*/
ExtendCLOG(xid);
/*
- * Must set MyProc->xid before releasing XidGenLock. This ensures that
- * when GetSnapshotData calls ReadNewTransactionId, all active XIDs
- * before the returned value of nextXid are already present in the shared
- * PROC array. Else we have a race condition.
+ * Must set MyProc->xid before releasing XidGenLock. This ensures
+ * that when GetSnapshotData calls ReadNewTransactionId, all active
+ * XIDs before the returned value of nextXid are already present in
+ * the shared PROC array. Else we have a race condition.
*
* XXX by storing xid into MyProc without acquiring SInvalLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
- * might see a partially-set xid here. But holding both locks at once
- * would be a nasty concurrency hit (and in fact could cause a deadlock
- * against GetSnapshotData). So for now, assume atomicity. Note that
- * readers of PROC xid field should be careful to fetch the value only
- * once, rather than assume they can read it multiple times and get the
- * same answer each time.
+ * might see a partially-set xid here. But holding both locks at once
+ * would be a nasty concurrency hit (and in fact could cause a
+ * deadlock against GetSnapshotData). So for now, assume atomicity.
+ * Note that readers of PROC xid field should be careful to fetch the
+ * value only once, rather than assume they can read it multiple times
+ * and get the same answer each time.
*
- * A solution to the atomic-store problem would be to give each PROC
- * its own spinlock used only for fetching/storing that PROC's xid.
+ * A solution to the atomic-store problem would be to give each PROC its
+ * own spinlock used only for fetching/storing that PROC's xid.
* (SInvalLock would then mean primarily that PROCs couldn't be added/
* removed while holding the lock.)
*/
@@ -91,7 +91,7 @@ GetNewTransactionId(void)
TransactionId
ReadNewTransactionId(void)
{
- TransactionId xid;
+ TransactionId xid;
/*
* During bootstrap initialization, we return the special bootstrap
@@ -117,16 +117,16 @@ static Oid lastSeenOid = InvalidOid;
Oid
GetNewObjectId(void)
{
- Oid result;
+ Oid result;
LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
/*
* Check for wraparound of the OID counter. We *must* not return 0
* (InvalidOid); and as long as we have to check that, it seems a good
- * idea to skip over everything below BootstrapObjectIdData too. (This
- * basically just reduces the odds of OID collision right after a wrap
- * occurs.) Note we are relying on unsigned comparison here.
+ * idea to skip over everything below BootstrapObjectIdData too.
+ * (This basically just reduces the odds of OID collision right after
+ * a wrap occurs.) Note we are relying on unsigned comparison here.
*/
if (ShmemVariableCache->nextOid < ((Oid) BootstrapObjectIdData))
{
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index cf9791a8e84..f4dc01adfdb 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.112 2001/10/18 17:30:03 thomas Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.113 2001/10/25 05:49:22 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -300,7 +300,6 @@ IsTransactionState(void)
*/
return false;
}
-
#endif
/* --------------------------------
@@ -476,7 +475,6 @@ AtStart_Cache(void)
static void
AtStart_Locks(void)
{
-
/*
* at present, it is unknown to me what belongs here -cim 3/18/90
*
@@ -492,7 +490,6 @@ AtStart_Locks(void)
static void
AtStart_Memory(void)
{
-
/*
* We shouldn't have any transaction contexts already.
*/
@@ -717,8 +714,8 @@ RecordTransactionAbort(void)
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_ABORT, &rdata);
/*
- * There's no need for XLogFlush here, since the default assumption
- * would be that we aborted, anyway.
+ * There's no need for XLogFlush here, since the default
+ * assumption would be that we aborted, anyway.
*/
/* Mark the transaction aborted in clog */
@@ -756,7 +753,6 @@ AtAbort_Cache(void)
static void
AtAbort_Locks(void)
{
-
/*
* XXX What if ProcReleaseLocks() fails? (race condition?)
*
@@ -773,7 +769,6 @@ AtAbort_Locks(void)
static void
AtAbort_Memory(void)
{
-
/*
* Make sure we are in a valid context (not a child of
* TransactionCommandContext...). Note that it is possible for this
@@ -807,7 +802,6 @@ AtAbort_Memory(void)
static void
AtCleanup_Memory(void)
{
-
/*
* Now that we're "out" of a transaction, have the system allocate
* things in the top memory context instead of per-transaction
@@ -909,7 +903,6 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
-
#endif
/* --------------------------------
@@ -965,12 +958,11 @@ CommitTransaction(void)
* this must be done _before_ releasing locks we hold and _after_
* RecordTransactionCommit.
*
- * LWLockAcquire(SInvalLock) is required: UPDATE with xid 0 is blocked
- * by xid 1' UPDATE, xid 1 is doing commit while xid 2 gets snapshot -
- * if xid 2' GetSnapshotData sees xid 1 as running then it must see
- * xid 0 as running as well or it will see two tuple versions - one
- * deleted by xid 1 and one inserted by xid 0. See notes in
- * GetSnapshotData.
+ * LWLockAcquire(SInvalLock) is required: UPDATE with xid 0 is blocked by
+ * xid 1' UPDATE, xid 1 is doing commit while xid 2 gets snapshot - if
+ * xid 2' GetSnapshotData sees xid 1 as running then it must see xid 0
+ * as running as well or it will see two tuple versions - one deleted
+ * by xid 1 and one inserted by xid 0. See notes in GetSnapshotData.
*/
if (MyProc != (PROC *) NULL)
{
@@ -1002,7 +994,7 @@ CommitTransaction(void)
AtCommit_Memory();
AtEOXact_Files();
- SharedBufferChanged = false;/* safest place to do it */
+ SharedBufferChanged = false; /* safest place to do it */
/* Count transaction commit in statistics collector */
pgstat_count_xact_commit();
@@ -1032,8 +1024,8 @@ AbortTransaction(void)
/*
* Release any LW locks we might be holding as quickly as possible.
* (Regular locks, however, must be held till we finish aborting.)
- * Releasing LW locks is critical since we might try to grab them again
- * while cleaning up!
+ * Releasing LW locks is critical since we might try to grab them
+ * again while cleaning up!
*/
LWLockReleaseAll();
@@ -1105,7 +1097,7 @@ AbortTransaction(void)
AtEOXact_Files();
AtAbort_Locks();
- SharedBufferChanged = false;/* safest place to do it */
+ SharedBufferChanged = false; /* safest place to do it */
/* Count transaction abort in statistics collector */
pgstat_count_xact_rollback();
@@ -1155,7 +1147,6 @@ StartTransactionCommand(void)
switch (s->blockState)
{
-
/*
* if we aren't in a transaction block, we just do our usual
* start transaction.
@@ -1238,7 +1229,6 @@ CommitTransactionCommand(void)
switch (s->blockState)
{
-
/*
* if we aren't in a transaction block, we just do our usual
* transaction commit
@@ -1313,7 +1303,6 @@ AbortCurrentTransaction(void)
switch (s->blockState)
{
-
/*
* if we aren't in a transaction block, we just do the basic
* abort & cleanup transaction.
@@ -1429,7 +1418,6 @@ EndTransactionBlock(void)
*/
if (s->blockState == TBLOCK_INPROGRESS)
{
-
/*
* here we are in a transaction block which should commit when we
* get to the upcoming CommitTransactionCommand() so we set the
@@ -1442,7 +1430,6 @@ EndTransactionBlock(void)
if (s->blockState == TBLOCK_ABORT)
{
-
/*
* here, we are in a transaction block which aborted and since the
* AbortTransaction() was already done, we do whatever is needed
@@ -1480,7 +1467,6 @@ AbortTransactionBlock(void)
*/
if (s->blockState == TBLOCK_INPROGRESS)
{
-
/*
* here we were inside a transaction block something screwed up
* inside the system so we enter the abort state, do the abort
@@ -1502,7 +1488,6 @@ AbortTransactionBlock(void)
AbortTransaction();
s->blockState = TBLOCK_ENDABORT;
}
-
#endif
/* --------------------------------
@@ -1527,7 +1512,6 @@ UserAbortTransactionBlock(void)
if (s->blockState == TBLOCK_INPROGRESS)
{
-
/*
* here we were inside a transaction block and we got an abort
* command from the user, so we move to the abort state, do the
diff --git a/src/backend/access/transam/xid.c b/src/backend/access/transam/xid.c
index babe7c9a48b..cde2713b0f0 100644
--- a/src/backend/access/transam/xid.c
+++ b/src/backend/access/transam/xid.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: xid.c,v 1.33 2001/08/26 16:55:59 tgl Exp $
+ * $Id: xid.c,v 1.34 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,6 +34,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
+
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *str = palloc(11);
@@ -64,7 +65,7 @@ xid_age(PG_FUNCTION_ARGS)
TransactionId now = GetCurrentTransactionId();
/* Permanent XIDs are always infinitely old */
- if (! TransactionIdIsNormal(xid))
+ if (!TransactionIdIsNormal(xid))
PG_RETURN_INT32(INT_MAX);
PG_RETURN_INT32((int32) (now - xid));
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 057ee72d55a..8c162ab3f65 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.78 2001/09/29 04:02:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.79 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ char XLOG_archive_dir[MAXPGPATH]; /* null string means
* delete 'em */
/*
- * XLOGfileslop is used in the code as the allowed "fuzz" in the number of
+ * XLOGfileslop is used in the code as the allowed "fuzz" in the number of
* preallocated XLOG segments --- we try to have at least XLOGfiles advance
* segments but no more than XLOGfiles+XLOGfileslop segments. This could
* be made a separate GUC variable, but at present I think it's sufficient
@@ -215,13 +215,13 @@ typedef struct XLogwrtRqst
{
XLogRecPtr Write; /* last byte + 1 to write out */
XLogRecPtr Flush; /* last byte + 1 to flush */
-} XLogwrtRqst;
+} XLogwrtRqst;
typedef struct XLogwrtResult
{
XLogRecPtr Write; /* last byte + 1 written out */
XLogRecPtr Flush; /* last byte + 1 flushed */
-} XLogwrtResult;
+} XLogwrtResult;
/*
* Shared state data for XLogInsert.
@@ -260,8 +260,9 @@ typedef struct XLogCtlData
/*
* These values do not change after startup, although the pointed-to
- * pages and xlblocks values certainly do. Permission to read/write the
- * pages and xlblocks values depends on WALInsertLock and WALWriteLock.
+ * pages and xlblocks values certainly do. Permission to read/write
+ * the pages and xlblocks values depends on WALInsertLock and
+ * WALWriteLock.
*/
char *pages; /* buffers for unwritten XLOG pages */
XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
@@ -428,8 +429,8 @@ static void XLogWrite(XLogwrtRqst WriteRqst);
static int XLogFileInit(uint32 log, uint32 seg,
bool *use_existent, bool use_lock);
static bool InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath,
- bool find_free, int max_advance,
- bool use_lock);
+ bool find_free, int max_advance,
+ bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
static void PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr);
@@ -621,8 +622,8 @@ begin:;
SpinLockRelease_NoHoldoff(&XLogCtl->info_lck);
/*
- * If cache is half filled then try to acquire write lock and
- * do XLogWrite. Ignore any fractional blocks in performing this check.
+ * If cache is half filled then try to acquire write lock and do
+ * XLogWrite. Ignore any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (LogwrtRqst.Write.xlogid != LogwrtResult.Write.xlogid ||
@@ -939,9 +940,7 @@ AdvanceXLInsertBuffer(void)
NewPageEndPtr.xrecoff = BLCKSZ;
}
else
- {
NewPageEndPtr.xrecoff += BLCKSZ;
- }
XLogCtl->xlblocks[nextidx] = NewPageEndPtr;
NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
Insert->curridx = nextidx;
@@ -956,7 +955,7 @@ AdvanceXLInsertBuffer(void)
/* And fill the new page's header */
NewPage->xlp_magic = XLOG_PAGE_MAGIC;
- /* NewPage->xlp_info = 0; */ /* done by memset */
+ /* NewPage->xlp_info = 0; *//* done by memset */
NewPage->xlp_sui = ThisStartUpID;
NewPage->xlp_pageaddr.xlogid = NewPageEndPtr.xlogid;
NewPage->xlp_pageaddr.xrecoff = NewPageEndPtr.xrecoff - BLCKSZ;
@@ -985,7 +984,6 @@ XLogWrite(XLogwrtRqst WriteRqst)
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
-
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@@ -1004,7 +1002,6 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
-
/*
* Switch to new logfile segment.
*/
@@ -1114,7 +1111,6 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
-
/*
* Could get here without iterating above loop, in which case we
* might have no open file or the wrong one. However, we do not
@@ -1174,11 +1170,11 @@ XLogFlush(XLogRecPtr record)
if (XLOG_DEBUG)
{
elog(DEBUG, "XLogFlush%s%s: request %X/%X; write %X/%X; flush %X/%X\n",
- (IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
- (InRedo) ? "(redo)" : "",
- record.xlogid, record.xrecoff,
- LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
- LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
+ (IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
+ (InRedo) ? "(redo)" : "",
+ record.xlogid, record.xrecoff,
+ LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
+ LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
fflush(stderr);
}
@@ -1240,7 +1236,7 @@ XLogFlush(XLogRecPtr record)
if (XLByteLT(LogwrtResult.Flush, record))
elog(STOP, "XLogFlush: request %X/%X is not satisfied --- flushed only to %X/%X",
record.xlogid, record.xrecoff,
- LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
+ LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
}
LWLockRelease(WALWriteLock);
}
@@ -1565,8 +1561,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
{
/*
* Before deleting the file, see if it can be recycled as
- * a future log segment. We allow recycling segments up to
- * XLOGfiles + XLOGfileslop segments beyond the current
+ * a future log segment. We allow recycling segments up
+ * to XLOGfiles + XLOGfileslop segments beyond the current
* XLOG location.
*/
if (InstallXLogFileSegment(endlogId, endlogSeg, path,
@@ -1719,7 +1715,6 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
-
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
@@ -1767,7 +1762,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0)
goto next_record_is_invalid;
- readOff = (uint32) (-1);/* force read to occur below */
+ readOff = (uint32) (-1); /* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@@ -2022,7 +2017,6 @@ WriteControlFile(void)
#ifdef USE_LOCALE
char *localeptr;
-
#endif
/*
@@ -2054,10 +2048,10 @@ WriteControlFile(void)
"\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
"\n\tre-initdb. For more information see the Administrator's Guide.",
ControlFile->lc_collate);
-#else /* not USE_LOCALE */
+#else /* not USE_LOCALE */
strcpy(ControlFile->lc_collate, "C");
strcpy(ControlFile->lc_ctype, "C");
-#endif /* not USE_LOCALE */
+#endif /* not USE_LOCALE */
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc);
@@ -2156,7 +2150,7 @@ ReadControlFile(void)
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
elog(STOP,
"The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
- "\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
+ "\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->catalog_version_no, CATALOG_VERSION_NO);
if (ControlFile->blcksz != BLCKSZ)
@@ -2174,7 +2168,7 @@ ReadControlFile(void)
#ifdef USE_LOCALE
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
elog(STOP,
- "The database cluster was initialized with LC_COLLATE '%s',\n"
+ "The database cluster was initialized with LC_COLLATE '%s',\n"
"\twhich is not recognized by setlocale().\n"
"\tIt looks like you need to initdb.",
ControlFile->lc_collate);
@@ -2184,15 +2178,15 @@ ReadControlFile(void)
"\twhich is not recognized by setlocale().\n"
"\tIt looks like you need to initdb.",
ControlFile->lc_ctype);
-#else /* not USE_LOCALE */
+#else /* not USE_LOCALE */
if (strcmp(ControlFile->lc_collate, "C") != 0 ||
strcmp(ControlFile->lc_ctype, "C") != 0)
elog(STOP,
- "The database cluster was initialized with LC_COLLATE '%s' and\n"
+ "The database cluster was initialized with LC_COLLATE '%s' and\n"
"\tLC_CTYPE '%s', but the server was compiled without locale support.\n"
"\tIt looks like you need to initdb or recompile.",
ControlFile->lc_collate, ControlFile->lc_ctype);
-#endif /* not USE_LOCALE */
+#endif /* not USE_LOCALE */
}
void
@@ -2536,7 +2530,7 @@ StartupXLOG(void)
{
/* nextXid must be beyond record's xid */
if (TransactionIdFollowsOrEquals(record->xl_xid,
- ShmemVariableCache->nextXid))
+ ShmemVariableCache->nextXid))
{
ShmemVariableCache->nextXid = record->xl_xid;
TransactionIdAdvance(ShmemVariableCache->nextXid);
@@ -2585,8 +2579,8 @@ StartupXLOG(void)
Insert->PrevRecord = LastRec;
/*
- * If the next record will go to the new page
- * then initialize for that one.
+ * If the next record will go to the new page then initialize for that
+ * one.
*/
if ((BLCKSZ - EndOfLog.xrecoff % BLCKSZ) < SizeOfXLogRecord)
EndOfLog.xrecoff += (BLCKSZ - EndOfLog.xrecoff % BLCKSZ);
@@ -2602,9 +2596,7 @@ StartupXLOG(void)
NewPageEndPtr.xrecoff = BLCKSZ;
}
else
- {
NewPageEndPtr.xrecoff += BLCKSZ;
- }
XLogCtl->xlblocks[0] = NewPageEndPtr;
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
if (InRecovery)
@@ -2621,9 +2613,10 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xlogid = openLogId;
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
+
/*
* Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block
+ * LastRec record spans, not the one it starts in. The last block
* is indeed the one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
@@ -2670,7 +2663,6 @@ StartupXLOG(void)
if (InRecovery)
{
-
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@@ -2748,8 +2740,8 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (record->xl_rmid != RM_XLOG_ID)
{
elog(LOG, (whichChkpt == 1 ?
- "invalid resource manager id in primary checkpoint record" :
- "invalid resource manager id in secondary checkpoint record"));
+ "invalid resource manager id in primary checkpoint record" :
+ "invalid resource manager id in secondary checkpoint record"));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
@@ -2845,11 +2837,11 @@ CreateCheckPoint(bool shutdown)
/*
* The CheckpointLock can be held for quite a while, which is not good
- * because we won't respond to a cancel/die request while waiting for an
- * LWLock. (But the alternative of using a regular lock won't work for
- * background checkpoint processes, which are not regular backends.)
- * So, rather than use a plain LWLockAcquire, use this kluge to allow
- * an interrupt to be accepted while we are waiting:
+ * because we won't respond to a cancel/die request while waiting for
+ * an LWLock. (But the alternative of using a regular lock won't work
+ * for background checkpoint processes, which are not regular
+ * backends.) So, rather than use a plain LWLockAcquire, use this
+ * kluge to allow an interrupt to be accepted while we are waiting:
*/
while (!LWLockConditionalAcquire(CheckpointLock, LW_EXCLUSIVE))
{
@@ -2996,7 +2988,8 @@ CreateCheckPoint(bool shutdown)
* but watch out for case that undo = 0.
*
* Without UNDO support: just use the redo pointer. This allows xlog
- * space to be freed much faster when there are long-running transactions.
+ * space to be freed much faster when there are long-running
+ * transactions.
*/
#ifdef NOT_USED
if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
@@ -3230,7 +3223,6 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
-
/*
* To ensure that no blocks escape unsynced, force an fsync on the
* currently open log segment (if any). Also, if the open flag is
@@ -3264,7 +3256,7 @@ issue_xlog_fsync(void)
{
switch (sync_method)
{
- case SYNC_METHOD_FSYNC:
+ case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
elog(STOP, "fsync of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 2394060a67f..e49d543ac8b 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.20 2001/10/05 17:28:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.21 2001/10/25 05:49:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -247,7 +247,7 @@ _xl_remove_hash_entry(XLogRelDesc *rdesc)
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
- (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
+ (void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: file was not found in cache");
@@ -304,9 +304,7 @@ XLogCloseRelationCache(void)
hash_seq_init(&status, _xlrelcache);
while ((hentry = (XLogRelCacheEntry *) hash_seq_search(&status)) != NULL)
- {
_xl_remove_hash_entry(hentry->rdesc);
- }
hash_destroy(_xlrelcache);