summaryrefslogtreecommitdiff
path: root/src/backend/access/gin
diff options
context:
space:
mode:
authorBruce Momjian2014-05-06 16:12:18 +0000
committerBruce Momjian2014-05-06 16:12:18 +0000
commit0a7832005792fa6dad171f9cadb8d587fe0dd800 (patch)
tree365cfc42c521a52607e41394b08ef44d338d8fc1 /src/backend/access/gin
parentfb85cd4320414c3f6e9c8bc69ec944200ae1e493 (diff)
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
Diffstat (limited to 'src/backend/access/gin')
-rw-r--r--src/backend/access/gin/ginarrayproc.c5
-rw-r--r--src/backend/access/gin/ginbtree.c27
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/gin/gindatapage.c96
-rw-r--r--src/backend/access/gin/ginentrypage.c7
-rw-r--r--src/backend/access/gin/ginfast.c8
-rw-r--r--src/backend/access/gin/ginget.c83
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gin/ginlogic.c39
-rw-r--r--src/backend/access/gin/ginpostinglist.c16
-rw-r--r--src/backend/access/gin/ginscan.c2
-rw-r--r--src/backend/access/gin/ginutil.c7
-rw-r--r--src/backend/access/gin/ginvacuum.c21
-rw-r--r--src/backend/access/gin/ginxlog.c20
14 files changed, 175 insertions, 160 deletions
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 32dbed68c77..66cea28113a 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = true;
@@ -279,9 +279,10 @@ ginarraytriconsistent(PG_FUNCTION_ARGS)
res = GIN_MAYBE;
break;
case GinEqualStrategy:
+
/*
* Must have all elements in check[] true; no discrimination
- * against nulls here. This is because array_contain_compare and
+ * against nulls here. This is because array_contain_compare and
* array_eq handle nulls differently ...
*/
res = GIN_MAYBE;
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 9b0f82fc904..27f88e0eb21 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -251,6 +251,7 @@ ginFindParents(GinBtree btree, GinBtreeStack *stack)
Assert(blkno != btree->rootBlkno);
ptr->blkno = blkno;
ptr->buffer = buffer;
+
/*
* parent may be wrong, but if so, the ginFinishSplit call will
* recurse to call ginFindParents again to fix it.
@@ -328,7 +329,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
GinPlaceToPageRC rc;
uint16 xlflags = 0;
Page childpage = NULL;
- Page newlpage = NULL, newrpage = NULL;
+ Page newlpage = NULL,
+ newrpage = NULL;
if (GinPageIsData(page))
xlflags |= GIN_INSERT_ISDATA;
@@ -346,8 +348,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
}
/*
- * Try to put the incoming tuple on the page. placeToPage will decide
- * if the page needs to be split.
+ * Try to put the incoming tuple on the page. placeToPage will decide if
+ * the page needs to be split.
*/
rc = btree->placeToPage(btree, stack->buffer, stack,
insertdata, updateblkno,
@@ -371,7 +373,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
XLogRecPtr recptr;
XLogRecData rdata[3];
ginxlogInsert xlrec;
- BlockIdData childblknos[2];
+ BlockIdData childblknos[2];
xlrec.node = btree->index->rd_node;
xlrec.blkno = BufferGetBlockNumber(stack->buffer);
@@ -449,7 +451,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
data.flags = xlflags;
if (childbuf != InvalidBuffer)
{
- Page childpage = BufferGetPage(childbuf);
+ Page childpage = BufferGetPage(childbuf);
+
GinPageGetOpaque(childpage)->flags &= ~GIN_INCOMPLETE_SPLIT;
data.leftChildBlkno = BufferGetBlockNumber(childbuf);
@@ -505,8 +508,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
/*
* Construct a new root page containing downlinks to the new left
- * and right pages. (do this in a temporary copy first rather
- * than overwriting the original page directly, so that we can still
+ * and right pages. (do this in a temporary copy first rather than
+ * overwriting the original page directly, so that we can still
* abort gracefully if this fails.)
*/
newrootpg = PageGetTempPage(newrpage);
@@ -604,7 +607,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
else
{
elog(ERROR, "unknown return code from GIN placeToPage method: %d", rc);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
@@ -627,8 +630,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
bool first = true;
/*
- * freestack == false when we encounter an incompletely split page during a
- * scan, while freestack == true is used in the normal scenario that a
+ * freestack == false when we encounter an incompletely split page during
+ * a scan, while freestack == true is used in the normal scenario that a
* split is finished right after the initial insert.
*/
if (!freestack)
@@ -650,8 +653,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
* then continue with the current one.
*
* Note: we have to finish *all* incomplete splits we encounter, even
- * if we have to move right. Otherwise we might choose as the target
- * a page that has no downlink in the parent, and splitting it further
+ * if we have to move right. Otherwise we might choose as the target a
+ * page that has no downlink in the parent, and splitting it further
* would fail.
*/
if (GinPageIsIncompleteSplit(BufferGetPage(parent->buffer)))
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index 9f3009b5894..3af027187ac 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum,
* Since the entries are being inserted into a balanced binary tree, you
* might think that the order of insertion wouldn't be critical, but it turns
* out that inserting the entries in sorted order results in a lot of
- * rebalancing operations and is slow. To prevent this, we attempt to insert
+ * rebalancing operations and is slow. To prevent this, we attempt to insert
* the nodes in an order that will produce a nearly-balanced tree if the input
* is in fact sorted.
*
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index c11ed858833..272a9ca7c09 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -49,8 +49,8 @@ typedef struct
dlist_head segments; /* a list of leafSegmentInfos */
/*
- * The following fields represent how the segments are split across
- * pages, if a page split is required. Filled in by leafRepackItems.
+ * The following fields represent how the segments are split across pages,
+ * if a page split is required. Filled in by leafRepackItems.
*/
dlist_node *lastleft; /* last segment on left page */
int lsize; /* total size on left page */
@@ -61,7 +61,7 @@ typedef struct
typedef struct
{
- dlist_node node; /* linked list pointers */
+ dlist_node node; /* linked list pointers */
/*-------------
* 'action' indicates the status of this in-memory segment, compared to
@@ -83,9 +83,9 @@ typedef struct
int nmodifieditems;
/*
- * The following fields represent the items in this segment. If 'items'
- * is not NULL, it contains a palloc'd array of the itemsin this segment.
- * If 'seg' is not NULL, it contains the items in an already-compressed
+ * The following fields represent the items in this segment. If 'items' is
+ * not NULL, it contains a palloc'd array of the itemsin this segment. If
+ * 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.
*/
@@ -386,7 +386,7 @@ GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset)
if (offset != maxoff + 1)
memmove(ptr + sizeof(PostingItem),
ptr,
- (maxoff - offset + 1) * sizeof(PostingItem));
+ (maxoff - offset + 1) *sizeof(PostingItem));
}
memcpy(ptr, data, sizeof(PostingItem));
@@ -436,8 +436,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
int maxitems = items->nitem - items->curitem;
Page page = BufferGetPage(buf);
int i;
- ItemPointerData rbound;
- ItemPointerData lbound;
+ ItemPointerData rbound;
+ ItemPointerData lbound;
bool needsplit;
bool append;
int segsize;
@@ -451,7 +451,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Assert(GinPageIsData(page));
- rbound = *GinDataPageGetRightBound(page);
+ rbound = *GinDataPageGetRightBound(page);
/*
* Count how many of the new items belong to this page.
@@ -464,8 +464,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
{
/*
* This needs to go to some other location in the tree. (The
- * caller should've chosen the insert location so that at least
- * the first item goes here.)
+ * caller should've chosen the insert location so that at
+ * least the first item goes here.)
*/
Assert(i > 0);
break;
@@ -553,7 +553,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
/* Add the new items to the segments */
if (!addItemsToLeaf(leaf, newItems, maxitems))
{
- /* all items were duplicates, we have nothing to do */
+ /* all items were duplicates, we have nothing to do */
items->curitem += maxitems;
MemoryContextSwitchTo(oldCxt);
@@ -680,7 +680,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Assert(GinPageRightMost(page) ||
ginCompareItemPointers(GinDataPageGetRightBound(*newlpage),
- GinDataPageGetRightBound(*newrpage)) < 0);
+ GinDataPageGetRightBound(*newrpage)) < 0);
if (append)
elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)",
@@ -769,16 +769,16 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs)
* We don't try to re-encode the segments here, even though some of them
* might be really small now that we've removed some items from them. It
* seems like a waste of effort, as there isn't really any benefit from
- * larger segments per se; larger segments only help to pack more items
- * in the same space. We might as well delay doing that until the next
+ * larger segments per se; larger segments only help to pack more items in
+ * the same space. We might as well delay doing that until the next
* insertion, which will need to re-encode at least part of the page
* anyway.
*
- * Also note if the page was in uncompressed, pre-9.4 format before, it
- * is now represented as one huge segment that contains all the items.
- * It might make sense to split that, to speed up random access, but we
- * don't bother. You'll have to REINDEX anyway if you want the full gain
- * of the new tighter index format.
+ * Also note if the page was in uncompressed, pre-9.4 format before, it is
+ * now represented as one huge segment that contains all the items. It
+ * might make sense to split that, to speed up random access, but we don't
+ * bother. You'll have to REINDEX anyway if you want the full gain of the
+ * new tighter index format.
*/
if (removedsomething)
{
@@ -795,6 +795,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs)
{
leafSegmentInfo *seginfo = dlist_container(leafSegmentInfo, node,
iter.cur);
+
if (seginfo->action != GIN_SEGMENT_UNMODIFIED)
modified = true;
if (modified && seginfo->action != GIN_SEGMENT_DELETE)
@@ -862,10 +863,11 @@ constructLeafRecompressWALData(Buffer buf, disassembledLeaf *leaf)
}
walbufbegin = palloc(
- sizeof(ginxlogRecompressDataLeaf) +
- BLCKSZ + /* max size needed to hold the segment data */
- nmodified * 2 + /* (segno + action) per action */
- sizeof(XLogRecData));
+ sizeof(ginxlogRecompressDataLeaf) +
+ BLCKSZ + /* max size needed to hold the segment
+ * data */
+ nmodified * 2 + /* (segno + action) per action */
+ sizeof(XLogRecData));
walbufend = walbufbegin;
recompress_xlog = (ginxlogRecompressDataLeaf *) walbufend;
@@ -965,9 +967,9 @@ dataPlaceToPageLeafRecompress(Buffer buf, disassembledLeaf *leaf)
int segsize;
/*
- * If the page was in pre-9.4 format before, convert the header, and
- * force all segments to be copied to the page whether they were modified
- * or not.
+ * If the page was in pre-9.4 format before, convert the header, and force
+ * all segments to be copied to the page whether they were modified or
+ * not.
*/
if (!GinPageIsCompressed(page))
{
@@ -1022,6 +1024,7 @@ dataPlaceToPageLeafSplit(Buffer buf, disassembledLeaf *leaf,
dlist_node *node;
dlist_node *firstright;
leafSegmentInfo *seginfo;
+
/* these must be static so they can be returned to caller */
static ginxlogSplitDataLeaf split_xlog;
static XLogRecData rdata[3];
@@ -1121,6 +1124,7 @@ dataPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack,
Page page = BufferGetPage(buf);
OffsetNumber off = stack->off;
PostingItem *pitem;
+
/* these must be static so they can be returned to caller */
static XLogRecData rdata;
static ginxlogInsertDataInternal data;
@@ -1198,7 +1202,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf,
int nrightitems;
Size pageSize = PageGetPageSize(oldpage);
ItemPointerData oldbound = *GinDataPageGetRightBound(oldpage);
- ItemPointer bound;
+ ItemPointer bound;
Page lpage;
Page rpage;
OffsetNumber separator;
@@ -1216,8 +1220,8 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf,
*prdata = rdata;
/*
- * First construct a new list of PostingItems, which includes all the
- * old items, and the new item.
+ * First construct a new list of PostingItems, which includes all the old
+ * items, and the new item.
*/
memcpy(allitems, GinDataPageGetPostingItem(oldpage, FirstOffsetNumber),
(off - 1) * sizeof(PostingItem));
@@ -1402,8 +1406,8 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
leafSegmentInfo *newseg;
/*
- * If the page is completely empty, just construct one new segment to
- * hold all the new items.
+ * If the page is completely empty, just construct one new segment to hold
+ * all the new items.
*/
if (dlist_is_empty(&leaf->segments))
{
@@ -1418,9 +1422,9 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
dlist_foreach(iter, &leaf->segments)
{
- leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
+ leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur);
int nthis;
- ItemPointer tmpitems;
+ ItemPointer tmpitems;
int ntmpitems;
/*
@@ -1434,7 +1438,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems)
ItemPointerData next_first;
next = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node,
- dlist_next_node(&leaf->segments, iter.cur));
+ dlist_next_node(&leaf->segments, iter.cur));
if (next->items)
next_first = next->items[0];
else
@@ -1556,27 +1560,27 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
if (seginfo->seg == NULL)
{
if (seginfo->nitems > GinPostingListSegmentMaxSize)
- npacked = 0; /* no chance that it would fit. */
+ npacked = 0; /* no chance that it would fit. */
else
{
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentMaxSize,
+ GinPostingListSegmentMaxSize,
&npacked);
}
if (npacked != seginfo->nitems)
{
/*
- * Too large. Compress again to the target size, and create
- * a new segment to represent the remaining items. The new
- * segment is inserted after this one, so it will be
- * processed in the next iteration of this loop.
+ * Too large. Compress again to the target size, and
+ * create a new segment to represent the remaining items.
+ * The new segment is inserted after this one, so it will
+ * be processed in the next iteration of this loop.
*/
if (seginfo->seg)
pfree(seginfo->seg);
seginfo->seg = ginCompressPostingList(seginfo->items,
seginfo->nitems,
- GinPostingListSegmentTargetSize,
+ GinPostingListSegmentTargetSize,
&npacked);
if (seginfo->action != GIN_SEGMENT_INSERT)
seginfo->action = GIN_SEGMENT_REPLACE;
@@ -1596,7 +1600,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining)
*/
if (SizeOfGinPostingList(seginfo->seg) < GinPostingListSegmentMinSize && next_node)
{
- int nmerged;
+ int nmerged;
nextseg = dlist_container(leafSegmentInfo, node, next_node);
@@ -1741,8 +1745,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber;
/*
- * Write as many of the items to the root page as fit. In segments
- * of max GinPostingListSegmentMaxSize bytes each.
+ * Write as many of the items to the root page as fit. In segments of max
+ * GinPostingListSegmentMaxSize bytes each.
*/
nrootitems = 0;
rootsize = 0;
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index 4291bab63be..412f90da4db 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -135,7 +135,8 @@ GinFormTuple(GinState *ginstate,
*/
if (data)
{
- char *ptr = GinGetPosting(itup);
+ char *ptr = GinGetPosting(itup);
+
memcpy(ptr, data, dataSize);
}
@@ -162,7 +163,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup,
{
Pointer ptr = GinGetPosting(itup);
int nipd = GinGetNPosting(itup);
- ItemPointer ipd;
+ ItemPointer ipd;
int ndecoded;
if (GinItupIsCompressed(itup))
@@ -192,7 +193,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup,
* Form a non-leaf entry tuple by copying the key data from the given tuple,
* which can be either a leaf or non-leaf entry tuple.
*
- * Any posting list in the source tuple is not copied. The specified child
+ * Any posting list in the source tuple is not copied. The specified child
* block number is inserted into t_tid.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index a16c2140c22..09c3e39bf3b 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* Create temporary index tuples for a single indexable item (one index column
* for the heap tuple specified by ht_ctid), and append them to the array
* in *collector. They will subsequently be written out using
- * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert. Note that to guarantee consistent state, all
* temp tuples for a given heap tuple must be written in one call to
* ginHeapTupleFastInsert.
*/
@@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* guarantees that inserted row(s) will not continue on next page.
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index fda19cf4e69..271f09901b9 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -85,7 +85,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
page = BufferGetPage(buffer);
if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0)
{
- int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+ int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap);
+
scanEntry->predictNumberResult += n;
}
@@ -100,7 +101,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
/*
* Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry. This supports three different match modes:
+ * match the search entry. This supports three different match modes:
*
* 1. Partial-match support: scan from current point until the
* comparePartialFn says we're done.
@@ -196,7 +197,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
/*
* In ALL mode, we are not interested in null items, so we can
* stop if we get to a null-item placeholder (which will be the
- * last entry for a given attnum). We do want to include NULL_KEY
+ * last entry for a given attnum). We do want to include NULL_KEY
* and EMPTY_ITEM entries, though.
*/
if (icategory == GIN_CAT_NULL_ITEM)
@@ -407,7 +408,7 @@ restartScanEntry:
else if (GinGetNPosting(itup) > 0)
{
entry->list = ginReadTuple(ginstate, entry->attnum, itup,
- &entry->nlist);
+ &entry->nlist);
entry->predictNumberResult = entry->nlist;
entry->isFinished = FALSE;
@@ -463,11 +464,11 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key)
* considerably, if the frequent term can be put in the additional set.
*
* There can be many legal ways to divide them entries into these two
- * sets. A conservative division is to just put everything in the
- * required set, but the more you can put in the additional set, the more
- * you can skip during the scan. To maximize skipping, we try to put as
- * many frequent items as possible into additional, and less frequent
- * ones into required. To do that, sort the entries by frequency
+ * sets. A conservative division is to just put everything in the required
+ * set, but the more you can put in the additional set, the more you can
+ * skip during the scan. To maximize skipping, we try to put as many
+ * frequent items as possible into additional, and less frequent ones into
+ * required. To do that, sort the entries by frequency
* (predictNumberResult), and put entries into the required set in that
* order, until the consistent function says that none of the remaining
* entries can form a match, without any items from the required set. The
@@ -635,8 +636,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
if (stepright)
{
/*
- * We've processed all the entries on this page. If it was the last
- * page in the tree, we're done.
+ * We've processed all the entries on this page. If it was the
+ * last page in the tree, we're done.
*/
if (GinPageRightMost(page))
{
@@ -647,8 +648,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
}
/*
- * Step to next page, following the right link. then find the first
- * ItemPointer greater than advancePast.
+ * Step to next page, following the right link. then find the
+ * first ItemPointer greater than advancePast.
*/
entry->buffer = ginStepRight(entry->buffer,
ginstate->index,
@@ -658,7 +659,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan
stepright = true;
if (GinPageGetOpaque(page)->flags & GIN_DELETED)
- continue; /* page was deleted by concurrent vacuum */
+ continue; /* page was deleted by concurrent vacuum */
/*
* The first item > advancePast might not be on this page, but
@@ -781,6 +782,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
gotitem = true;
break;
}
+
/*
* Not a lossy page. Skip over any offsets <= advancePast, and
* return that.
@@ -788,8 +790,9 @@ entryGetItem(GinState *ginstate, GinScanEntry entry,
if (entry->matchResult->blockno == advancePastBlk)
{
/*
- * First, do a quick check against the last offset on the page.
- * If that's > advancePast, so are all the other offsets.
+ * First, do a quick check against the last offset on the
+ * page. If that's > advancePast, so are all the other
+ * offsets.
*/
if (entry->matchResult->offsets[entry->matchResult->ntuples - 1] <= advancePastOff)
{
@@ -890,8 +893,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
/*
* We might have already tested this item; if so, no need to repeat work.
- * (Note: the ">" case can happen, if advancePast is exact but we previously
- * had to set curItem to a lossy-page pointer.)
+ * (Note: the ">" case can happen, if advancePast is exact but we
+ * previously had to set curItem to a lossy-page pointer.)
*/
if (ginCompareItemPointers(&key->curItem, &advancePast) > 0)
return;
@@ -942,8 +945,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
/*
* Ok, we now know that there are no matches < minItem.
*
- * If minItem is lossy, it means that there were no exact items on
- * the page among requiredEntries, because lossy pointers sort after exact
+ * If minItem is lossy, it means that there were no exact items on the
+ * page among requiredEntries, because lossy pointers sort after exact
* items. However, there might be exact items for the same page among
* additionalEntries, so we mustn't advance past them.
*/
@@ -1085,6 +1088,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
if (entry->isFinished)
key->entryRes[i] = GIN_FALSE;
#if 0
+
/*
* This case can't currently happen, because we loaded all the entries
* for this item earlier.
@@ -1119,6 +1123,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
break;
default:
+
/*
* the 'default' case shouldn't happen, but if the consistent
* function returns something bogus, this is the safe result
@@ -1129,11 +1134,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key,
}
/*
- * We have a tuple, and we know if it matches or not. If it's a
- * non-match, we could continue to find the next matching tuple, but
- * let's break out and give scanGetItem a chance to advance the other
- * keys. They might be able to skip past to a much higher TID, allowing
- * us to save work.
+ * We have a tuple, and we know if it matches or not. If it's a non-match,
+ * we could continue to find the next matching tuple, but let's break out
+ * and give scanGetItem a chance to advance the other keys. They might be
+ * able to skip past to a much higher TID, allowing us to save work.
*/
/* clean up after consistentFn calls */
@@ -1165,14 +1169,14 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
* matching item.
*
* This logic works only if a keyGetItem stream can never contain both
- * exact and lossy pointers for the same page. Else we could have a
+ * exact and lossy pointers for the same page. Else we could have a
* case like
*
* stream 1 stream 2
- * ... ...
+ * ... ...
* 42/6 42/7
* 50/1 42/0xffff
- * ... ...
+ * ... ...
*
* We would conclude that 42/6 is not a match and advance stream 1,
* thus never detecting the match to the lossy pointer in stream 2.
@@ -1205,12 +1209,11 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
/*
- * It's a match. We can conclude that nothing < matches, so
- * the other key streams can skip to this item.
+ * It's a match. We can conclude that nothing < matches, so the
+ * other key streams can skip to this item.
*
- * Beware of lossy pointers, though; from a lossy pointer, we
- * can only conclude that nothing smaller than this *block*
- * matches.
+ * Beware of lossy pointers, though; from a lossy pointer, we can
+ * only conclude that nothing smaller than this *block* matches.
*/
if (ItemPointerIsLossyPage(&key->curItem))
{
@@ -1229,8 +1232,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
}
/*
- * If this is the first key, remember this location as a
- * potential match, and proceed to check the rest of the keys.
+ * If this is the first key, remember this location as a potential
+ * match, and proceed to check the rest of the keys.
*
* Otherwise, check if this is the same item that we checked the
* previous keys for (or a lossy pointer for the same page). If
@@ -1247,7 +1250,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
if (ItemPointerIsLossyPage(&key->curItem) ||
ItemPointerIsLossyPage(item))
{
- Assert (GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
+ Assert(GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item));
match = (GinItemPointerGetBlockNumber(&key->curItem) ==
GinItemPointerGetBlockNumber(item));
}
@@ -1264,8 +1267,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast,
/*
* Now *item contains the first ItemPointer after previous result that
- * satisfied all the keys for that exact TID, or a lossy reference
- * to the same page.
+ * satisfied all the keys for that exact TID, or a lossy reference to the
+ * same page.
*
* We must return recheck = true if any of the keys are marked recheck.
*/
@@ -1776,10 +1779,10 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* First, scan the pending list and collect any matching entries into the
- * bitmap. After we scan a pending item, some other backend could post it
+ * bitmap. After we scan a pending item, some other backend could post it
* into the main index, and so we might visit it a second time during the
* main scan. This is okay because we'll just re-set the same bit in the
- * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
* can't support the amgettuple API, however.) Note that it would not do
* to scan the main index before the pending list, since concurrent
* cleanup could then make us miss entries entirely.
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index 3bafb6471b3..b27cae3aab2 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -40,7 +40,7 @@ typedef struct
* Adds array of item pointers to tuple's posting list, or
* creates posting tree and tuple pointing to tree in case
* of not enough space. Max size of tuple is defined in
- * GinFormTuple(). Returns a new, modified index tuple.
+ * GinFormTuple(). Returns a new, modified index tuple.
* items[] must be in sorted order with no duplicates.
*/
static IndexTuple
diff --git a/src/backend/access/gin/ginlogic.c b/src/backend/access/gin/ginlogic.c
index 167d25ea5c7..052abd2bd8e 100644
--- a/src/backend/access/gin/ginlogic.c
+++ b/src/backend/access/gin/ginlogic.c
@@ -47,7 +47,7 @@
* Maximum number of MAYBE inputs that shimTriConsistentFn will try to
* resolve by calling all combinations.
*/
-#define MAX_MAYBE_ENTRIES 4
+#define MAX_MAYBE_ENTRIES 4
/*
* Dummy consistent functions for an EVERYTHING key. Just claim it matches.
@@ -95,14 +95,14 @@ static GinTernaryValue
directTriConsistentFn(GinScanKey key)
{
return DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
}
@@ -115,15 +115,16 @@ static bool
shimBoolConsistentFn(GinScanKey key)
{
GinTernaryValue result;
+
result = DatumGetGinTernaryValue(FunctionCall7Coll(
- key->triConsistentFmgrInfo,
- key->collation,
- PointerGetDatum(key->entryRes),
- UInt16GetDatum(key->strategy),
- key->query,
- UInt32GetDatum(key->nuserentries),
- PointerGetDatum(key->extra_data),
- PointerGetDatum(key->queryValues),
+ key->triConsistentFmgrInfo,
+ key->collation,
+ PointerGetDatum(key->entryRes),
+ UInt16GetDatum(key->strategy),
+ key->query,
+ UInt32GetDatum(key->nuserentries),
+ PointerGetDatum(key->extra_data),
+ PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
if (result == GIN_MAYBE)
{
@@ -240,8 +241,8 @@ ginInitConsistentFunction(GinState *ginstate, GinScanKey key)
key->boolConsistentFn = shimBoolConsistentFn;
if (OidIsValid(ginstate->triConsistentFn[key->attnum - 1].fn_oid))
- key->triConsistentFn = directTriConsistentFn;
+ key->triConsistentFn = directTriConsistentFn;
else
- key->triConsistentFn = shimTriConsistentFn;
+ key->triConsistentFn = shimTriConsistentFn;
}
}
diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c
index 81bbb09c244..606a824f125 100644
--- a/src/backend/access/gin/ginpostinglist.c
+++ b/src/backend/access/gin/ginpostinglist.c
@@ -126,9 +126,9 @@ encode_varbyte(uint64 val, unsigned char **ptr)
static uint64
decode_varbyte(unsigned char **ptr)
{
- uint64 val;
+ uint64 val;
unsigned char *p = *ptr;
- uint64 c;
+ uint64 c;
c = *(p++);
val = c & 0x7F;
@@ -210,7 +210,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize,
uint64 val = itemptr_to_uint64(&ipd[totalpacked]);
uint64 delta = val - prev;
- Assert (val > prev);
+ Assert(val > prev);
if (endptr - ptr >= 6)
encode_varbyte(delta, &ptr);
@@ -225,7 +225,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize,
encode_varbyte(delta, &p);
if (p - buf > (endptr - ptr))
- break; /* output is full */
+ break; /* output is full */
memcpy(ptr, buf, p - buf);
ptr += (p - buf);
@@ -286,7 +286,7 @@ ginPostingListDecode(GinPostingList *plist, int *ndecoded)
ItemPointer
ginPostingListDecodeAllSegments(GinPostingList *segment, int len, int *ndecoded_out)
{
- ItemPointer result;
+ ItemPointer result;
int nallocated;
uint64 val;
char *endseg = ((char *) segment) + len;
@@ -349,7 +349,7 @@ ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int len,
TIDBitmap *tbm)
{
int ndecoded;
- ItemPointer items;
+ ItemPointer items;
items = ginPostingListDecodeAllSegments(ptr, len, &ndecoded);
tbm_add_tuples(tbm, items, ndecoded, false);
@@ -374,8 +374,8 @@ ginMergeItemPointers(ItemPointerData *a, uint32 na,
dst = (ItemPointer) palloc((na + nb) * sizeof(ItemPointerData));
/*
- * If the argument arrays don't overlap, we can just append them to
- * each other.
+ * If the argument arrays don't overlap, we can just append them to each
+ * other.
*/
if (na == 0 || nb == 0 || ginCompareItemPointers(&a[na - 1], &b[0]) < 0)
{
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index b19386e19ad..66c62b2e32a 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -389,7 +389,7 @@ ginNewScanKey(IndexScanDesc scan)
/*
* If the index is version 0, it may be missing null and placeholder
* entries, which would render searches for nulls and full-index scans
- * unreliable. Throw an error if so.
+ * unreliable. Throw an error if so.
*/
if (hasNullQuery && !so->isVoidRes)
{
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index 4dadb50dcaa..3ca0b68434b 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -67,6 +67,7 @@ initGinState(GinState *state, Relation index)
fmgr_info_copy(&(state->extractQueryFn[i]),
index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC),
CurrentMemoryContext);
+
/*
* Check opclass capability to do tri-state or binary logic consistent
* check.
@@ -74,14 +75,14 @@ initGinState(GinState *state, Relation index)
if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->triConsistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC),
CurrentMemoryContext);
}
if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->consistentFn[i]),
- index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
+ index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
CurrentMemoryContext);
}
@@ -458,7 +459,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
* If there's more than one key, sort and unique-ify.
*
* XXX Using qsort here is notationally painful, and the overhead is
- * pretty bad too. For small numbers of keys it'd likely be better to use
+ * pretty bad too. For small numbers of keys it'd likely be better to use
* a simple insertion sort.
*/
if (*nentries > 1)
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 72f734caf8d..af4d2714b5f 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -47,7 +47,7 @@ ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items,
{
int i,
remaining = 0;
- ItemPointer tmpitems = NULL;
+ ItemPointer tmpitems = NULL;
/*
* Iterate over TIDs array
@@ -208,8 +208,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
}
/*
- * if we have root and there are empty pages in tree, then we don't release
- * lock to go further processing and guarantee that tree is unused
+ * if we have root and there are empty pages in tree, then we don't
+ * release lock to go further processing and guarantee that tree is unused
*/
if (!(isRoot && hasVoidPage))
{
@@ -236,7 +236,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
Buffer pBuffer;
Page page,
parentPage;
- BlockNumber rightlink;
+ BlockNumber rightlink;
/*
* Lock the pages in the same order as an insertion would, to avoid
@@ -302,11 +302,11 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
data.rightLink = GinPageGetOpaque(page)->rightlink;
/*
- * We can't pass buffer_std = TRUE, because we didn't set pd_lower
- * on pre-9.4 versions. The page might've been binary-upgraded from
- * an older version, and hence not have pd_lower set correctly.
- * Ditto for the left page, but removing the item from the parent
- * updated its pd_lower, so we know that's OK at this point.
+ * We can't pass buffer_std = TRUE, because we didn't set pd_lower on
+ * pre-9.4 versions. The page might've been binary-upgraded from an
+ * older version, and hence not have pd_lower set correctly. Ditto for
+ * the left page, but removing the item from the parent updated its
+ * pd_lower, so we know that's OK at this point.
*/
rdata[0].buffer = dBuffer;
rdata[0].buffer_std = FALSE;
@@ -538,7 +538,8 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
}
/*
- * if we already created a temporary page, make changes in place
+ * if we already created a temporary page, make changes in
+ * place
*/
if (tmppage == origpage)
{
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index d19389330c5..a8a917a9d0e 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -133,7 +133,7 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
- ForkNumber forknum;
+ ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
@@ -341,8 +341,8 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
payload = XLogRecGetData(record) + sizeof(ginxlogInsert);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split.
+ * First clear incomplete-split flag on child page if this finishes a
+ * split.
*/
if (!isLeaf)
{
@@ -472,8 +472,8 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
payload = XLogRecGetData(record) + sizeof(ginxlogSplit);
/*
- * First clear incomplete-split flag on child page if this finishes
- * a split
+ * First clear incomplete-split flag on child page if this finishes a
+ * split
*/
if (!isLeaf)
{
@@ -522,7 +522,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record)
if (isRoot)
{
- BlockNumber rootBlkno = data->rrlink;
+ BlockNumber rootBlkno = data->rrlink;
Buffer rootBuf = XLogReadBuffer(data->node, rootBlkno, true);
Page rootPage = BufferGetPage(rootBuf);
@@ -711,9 +711,9 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
Buffer buffer;
/*
- * Restore the metapage. This is essentially the same as a full-page image,
- * so restore the metapage unconditionally without looking at the LSN, to
- * avoid torn page hazards.
+ * Restore the metapage. This is essentially the same as a full-page
+ * image, so restore the metapage unconditionally without looking at the
+ * LSN, to avoid torn page hazards.
*/
metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false);
if (!BufferIsValid(metabuffer))
@@ -877,7 +877,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
/*
* In normal operation, shiftList() takes exclusive lock on all the
- * pages-to-be-deleted simultaneously. During replay, however, it should
+ * pages-to-be-deleted simultaneously. During replay, however, it should
* be all right to lock them one at a time. This is dependent on the fact
* that we are deleting pages from the head of the list, and that readers
* share-lock the next page before releasing the one they are on. So we