summaryrefslogtreecommitdiff
path: root/src/backend/access/hash
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/hash')
-rw-r--r--src/backend/access/hash/hash.c16
-rw-r--r--src/backend/access/hash/hashinsert.c15
-rw-r--r--src/backend/access/hash/hashovfl.c44
-rw-r--r--src/backend/access/hash/hashpage.c114
-rw-r--r--src/backend/access/hash/hashscan.c8
-rw-r--r--src/backend/access/hash/hashsearch.c22
-rw-r--r--src/backend/access/hash/hashutil.c3
7 files changed, 115 insertions, 107 deletions
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index db00490e582..40b05720fb2 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.72 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.73 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -210,8 +210,8 @@ hashgettuple(PG_FUNCTION_ARGS)
bool res;
/*
- * We hold pin but not lock on current buffer while outside the hash AM.
- * Reacquire the read lock here.
+ * We hold pin but not lock on current buffer while outside the hash
+ * AM. Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
@@ -470,7 +470,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
/*
* Read the metapage to fetch original bucket and tuple counts. Also,
* we keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a
+ * hashm_spares[] values to compute bucket page addresses. This is a
* bit hokey but perfectly safe, since the interesting entries in the
* spares array cannot change under us; and it beats rereading the
* metapage for each bucket.
@@ -532,7 +532,7 @@ loop_top:
ItemPointer htup;
hitem = (HashItem) PageGetItem(page,
- PageGetItemId(page, offno));
+ PageGetItemId(page, offno));
htup = &(hitem->hash_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -595,8 +595,8 @@ loop_top:
orig_ntuples == metap->hashm_ntuples)
{
/*
- * No one has split or inserted anything since start of scan,
- * so believe our count as gospel.
+ * No one has split or inserted anything since start of scan, so
+ * believe our count as gospel.
*/
metap->hashm_ntuples = num_index_tuples;
}
@@ -604,7 +604,7 @@ loop_top:
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by
+ * double-scanned tuples in split buckets. Proceed by
* dead-reckoning.
*/
if (metap->hashm_ntuples > tuples_removed)
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index b1c303f8d07..91ae559e3a6 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.33 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.34 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -20,7 +20,7 @@
static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
- Size itemsize, HashItem hitem);
+ Size itemsize, HashItem hitem);
/*
@@ -81,7 +81,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
/*
* Check whether the item can fit on a hash page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) Note that at this
+ * we ought to try to apply TOAST methods if not.) Note that at this
* point, itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
@@ -105,7 +105,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
- * Acquire share lock on target bucket; then we can release split lock.
+ * Acquire share lock on target bucket; then we can release split
+ * lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -124,7 +125,7 @@ _hash_doinsert(Relation rel, HashItem hitem)
/*
* no space on this page; check for an overflow page
*/
- BlockNumber nextblkno = pageopaque->hasho_nextblkno;
+ BlockNumber nextblkno = pageopaque->hasho_nextblkno;
if (BlockNumberIsValid(nextblkno))
{
@@ -169,8 +170,8 @@ _hash_doinsert(Relation rel, HashItem hitem)
_hash_droplock(rel, blkno, HASH_SHARE);
/*
- * Write-lock the metapage so we can increment the tuple count.
- * After incrementing it, check to see if it's time for a split.
+ * Write-lock the metapage so we can increment the tuple count. After
+ * incrementing it, check to see if it's time for a split.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 740f119bc7a..c02da93dc1e 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.43 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.44 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -41,11 +41,11 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum)
for (i = 1;
i < splitnum && ovflbitnum > metap->hashm_spares[i];
i++)
- /* loop */ ;
+ /* loop */ ;
/*
- * Convert to absolute page number by adding the number of bucket pages
- * that exist before this split point.
+ * Convert to absolute page number by adding the number of bucket
+ * pages that exist before this split point.
*/
return (BlockNumber) ((1 << i) + ovflbitnum);
}
@@ -79,7 +79,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
@@ -88,12 +88,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno)
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
@@ -197,7 +197,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
/* outer loop iterates once per bitmap page */
for (;;)
{
- BlockNumber mapblkno;
+ BlockNumber mapblkno;
Page mappage;
uint32 last_inpage;
@@ -274,9 +274,9 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't
- * risk changing it if someone moved it while we were searching
- * bitmap pages.
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * changing it if someone moved it while we were searching bitmap
+ * pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
metap->hashm_firstfree = bit + 1;
@@ -304,9 +304,9 @@ found:
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't
- * risk changing it if someone moved it while we were searching
- * bitmap pages.
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * changing it if someone moved it while we were searching bitmap
+ * pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
@@ -381,7 +381,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
Bucket bucket;
/* Get information from the doomed page */
- ovflblkno = BufferGetBlockNumber(ovflbuf);
+ ovflblkno = BufferGetBlockNumber(ovflbuf);
ovflpage = BufferGetPage(ovflbuf);
_hash_checkpage(rel, ovflpage, LH_OVERFLOW_PAGE);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
@@ -396,7 +396,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must
* fix up the bucket chain members behind and ahead of the overflow
- * page being deleted. No concurrency issues since we hold exclusive
+ * page being deleted. No concurrency issues since we hold exclusive
* lock on the entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
@@ -488,7 +488,8 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
/*
* It is okay to write-lock the new bitmap page while holding metapage
- * write lock, because no one else could be contending for the new page.
+ * write lock, because no one else could be contending for the new
+ * page.
*
* There is some loss of concurrency in possibly doing I/O for the new
* page while holding the metapage lock, but this path is taken so
@@ -654,8 +655,8 @@ _hash_squeezebucket(Relation rel,
/*
* delete the tuple from the "read" page. PageIndexTupleDelete
- * repacks the ItemId array, so 'roffnum' will be "advanced" to
- * the "next" ItemId.
+ * repacks the ItemId array, so 'roffnum' will be "advanced"
+ * to the "next" ItemId.
*/
PageIndexTupleDelete(rpage, roffnum);
}
@@ -667,8 +668,9 @@ _hash_squeezebucket(Relation rel,
* Tricky point here: if our read and write pages are adjacent in the
* bucket chain, our write lock on wbuf will conflict with
* _hash_freeovflpage's attempt to update the sibling links of the
- * removed page. However, in that case we are done anyway, so we can
- * simply drop the write lock before calling _hash_freeovflpage.
+ * removed page. However, in that case we are done anyway, so we
+ * can simply drop the write lock before calling
+ * _hash_freeovflpage.
*/
if (PageIsEmpty(rpage))
{
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 787bb9bf621..d3088f50cef 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.45 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.46 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -35,11 +35,11 @@
static void _hash_splitbucket(Relation rel, Buffer metabuf,
- Bucket obucket, Bucket nbucket,
- BlockNumber start_oblkno,
- BlockNumber start_nblkno,
- uint32 maxbucket,
- uint32 highmask, uint32 lowmask);
+ Bucket obucket, Bucket nbucket,
+ BlockNumber start_oblkno,
+ BlockNumber start_nblkno,
+ uint32 maxbucket,
+ uint32 highmask, uint32 lowmask);
/*
@@ -47,7 +47,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf,
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
@@ -239,13 +239,13 @@ _hash_metapinit(Relation rel)
RelationGetRelationName(rel));
/*
- * Determine the target fill factor (tuples per bucket) for this index.
- * The idea is to make the fill factor correspond to pages about 3/4ths
- * full. We can compute it exactly if the index datatype is fixed-width,
- * but for var-width there's some guessing involved.
+ * Determine the target fill factor (tuples per bucket) for this
+ * index. The idea is to make the fill factor correspond to pages
+ * about 3/4ths full. We can compute it exactly if the index datatype
+ * is fixed-width, but for var-width there's some guessing involved.
*/
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
- RelationGetDescr(rel)->attrs[0]->atttypmod);
+ RelationGetDescr(rel)->attrs[0]->atttypmod);
item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
ffactor = (BLCKSZ * 3 / 4) / item_width;
@@ -288,8 +288,9 @@ _hash_metapinit(Relation rel)
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
- * We initialize the index with two buckets, 0 and 1, occupying physical
- * blocks 1 and 2. The first freespace bitmap page is in block 3.
+ * We initialize the index with two buckets, 0 and 1, occupying
+ * physical blocks 1 and 2. The first freespace bitmap page is in
+ * block 3.
*/
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
@@ -297,7 +298,7 @@ _hash_metapinit(Relation rel)
MemSet((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares));
MemSet((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
- metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
+ metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
metap->hashm_ovflpoint = 1;
metap->hashm_firstfree = 0;
@@ -319,8 +320,8 @@ _hash_metapinit(Relation rel)
}
/*
- * Initialize first bitmap page. Can't do this until we
- * create the first two buckets, else smgr will complain.
+ * Initialize first bitmap page. Can't do this until we create the
+ * first two buckets, else smgr will complain.
*/
_hash_initbitmap(rel, metap, 3);
@@ -362,17 +363,18 @@ _hash_expandtable(Relation rel, Buffer metabuf)
uint32 lowmask;
/*
- * Obtain the page-zero lock to assert the right to begin a split
- * (see README).
+ * Obtain the page-zero lock to assert the right to begin a split (see
+ * README).
*
* Note: deadlock should be impossible here. Our own backend could only
- * be holding bucket sharelocks due to stopped indexscans; those will not
- * block other holders of the page-zero lock, who are only interested in
- * acquiring bucket sharelocks themselves. Exclusive bucket locks are
- * only taken here and in hashbulkdelete, and neither of these operations
- * needs any additional locks to complete. (If, due to some flaw in this
- * reasoning, we manage to deadlock anyway, it's okay to error out; the
- * index will be left in a consistent state.)
+ * be holding bucket sharelocks due to stopped indexscans; those will
+ * not block other holders of the page-zero lock, who are only
+ * interested in acquiring bucket sharelocks themselves. Exclusive
+ * bucket locks are only taken here and in hashbulkdelete, and neither
+ * of these operations needs any additional locks to complete. (If,
+ * due to some flaw in this reasoning, we manage to deadlock anyway,
+ * it's okay to error out; the index will be left in a consistent
+ * state.)
*/
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
@@ -383,8 +385,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check to see if split is still needed; someone else might have already
- * done one while we waited for the lock.
+ * Check to see if split is still needed; someone else might have
+ * already done one while we waited for the lock.
*
* Make sure this stays in sync with_hash_doinsert()
*/
@@ -394,16 +396,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if
- * we are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
+ * Ideally we would lock the new bucket too before proceeding, but if we
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
* isn't correct yet. For simplicity we update the metapage first and
- * then lock. This should be okay because no one else should be trying
- * to lock the new bucket yet...
+ * then lock. This should be okay because no one else should be
+ * trying to lock the new bucket yet...
*/
new_bucket = metap->hashm_maxbucket + 1;
old_bucket = (new_bucket & metap->hashm_lowmask);
@@ -417,7 +419,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping
+ * info.
*/
metap->hashm_maxbucket = new_bucket;
@@ -431,11 +434,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* If the split point is increasing (hashm_maxbucket's log base 2
* increases), we need to adjust the hashm_spares[] array and
- * hashm_ovflpoint so that future overflow pages will be created beyond
- * this new batch of bucket pages.
+ * hashm_ovflpoint so that future overflow pages will be created
+ * beyond this new batch of bucket pages.
*
- * XXX should initialize new bucket pages to prevent out-of-order
- * page creation? Don't wanna do it right here though.
+ * XXX should initialize new bucket pages to prevent out-of-order page
+ * creation? Don't wanna do it right here though.
*/
spare_ndx = _hash_log2(metap->hashm_maxbucket + 1);
if (spare_ndx > metap->hashm_ovflpoint)
@@ -456,9 +459,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
- * split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
- * needs is to tell which of these two buckets to map hashkeys into.
+ * split lock, other splits could begin, so these values might be out
+ * of date before _hash_splitbucket finishes. That's okay, since all
+ * it needs is to tell which of these two buckets to map hashkeys
+ * into.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
@@ -539,8 +543,8 @@ _hash_splitbucket(Relation rel,
/*
* It should be okay to simultaneously write-lock pages from each
- * bucket, since no one else can be trying to acquire buffer lock
- * on pages of either bucket.
+ * bucket, since no one else can be trying to acquire buffer lock on
+ * pages of either bucket.
*/
oblkno = start_oblkno;
nblkno = start_nblkno;
@@ -562,9 +566,9 @@ _hash_splitbucket(Relation rel,
nopaque->hasho_filler = HASHO_FILL;
/*
- * Partition the tuples in the old bucket between the old bucket and the
- * new bucket, advancing along the old bucket's overflow bucket chain
- * and adding overflow pages to the new bucket as needed.
+ * Partition the tuples in the old bucket between the old bucket and
+ * the new bucket, advancing along the old bucket's overflow bucket
+ * chain and adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
@@ -582,9 +586,10 @@ _hash_splitbucket(Relation rel,
oblkno = oopaque->hasho_nextblkno;
if (!BlockNumberIsValid(oblkno))
break;
+
/*
- * we ran out of tuples on this particular page, but we
- * have more overflow pages; advance to next page.
+ * we ran out of tuples on this particular page, but we have
+ * more overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
@@ -600,8 +605,8 @@ _hash_splitbucket(Relation rel,
/*
* Re-hash the tuple to determine which bucket it now belongs in.
*
- * It is annoying to call the hash function while holding locks,
- * but releasing and relocking the page for each tuple is unappealing
+ * It is annoying to call the hash function while holding locks, but
+ * releasing and relocking the page for each tuple is unappealing
* too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
@@ -666,10 +671,11 @@ _hash_splitbucket(Relation rel,
}
/*
- * We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
- * tuples remaining in the old bucket (including the overflow pages) are
- * packed as tightly as possible. The new bucket is already tight.
+ * We're at the end of the old bucket chain, so we're done
+ * partitioning the tuples. Before quitting, call _hash_squeezebucket
+ * to ensure the tuples remaining in the old bucket (including the
+ * overflow pages) are packed as tightly as possible. The new bucket
+ * is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c
index 2fc24dd9e12..16d2a77d49b 100644
--- a/src/backend/access/hash/hashscan.c
+++ b/src/backend/access/hash/hashscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,9 +44,9 @@ ReleaseResources_hash(void)
HashScanList next;
/*
- * Note: this should be a no-op during normal query shutdown.
- * However, in an abort situation ExecutorEnd is not called and so
- * there may be open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However,
+ * in an abort situation ExecutorEnd is not called and so there may be
+ * open index scans to clean up.
*/
prev = NULL;
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 76ad5d31849..daaff4adc50 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -137,12 +137,13 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
* We do not support hash scans with no index qualification, because
* we would have to read the whole index rather than just one bucket.
* That creates a whole raft of problems, since we haven't got a
- * practical way to lock all the buckets against splits or compactions.
+ * practical way to lock all the buckets against splits or
+ * compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("hash indexes do not support whole-index scans")));
+ errmsg("hash indexes do not support whole-index scans")));
/*
* If the constant in the index qual is NULL, assume it cannot match
@@ -182,7 +183,8 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
_hash_relbuf(rel, metabuf);
/*
- * Acquire share lock on target bucket; then we can release split lock.
+ * Acquire share lock on target bucket; then we can release split
+ * lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
@@ -287,9 +289,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
while (offnum > maxoff)
{
/*
- * either this page is empty
- * (maxoff == InvalidOffsetNumber)
- * or we ran off the end.
+ * either this page is empty (maxoff ==
+ * InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
@@ -315,15 +316,12 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
while (offnum < FirstOffsetNumber)
{
/*
- * either this page is empty
- * (offnum == InvalidOffsetNumber)
- * or we ran off the end.
+ * either this page is empty (offnum ==
+ * InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
- {
maxoff = offnum = PageGetMaxOffsetNumber(page);
- }
else
{
/* end of bucket */
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 3fb04e77d86..bf9999dc92b 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.39 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.40 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -113,6 +113,7 @@ void
_hash_checkpage(Relation rel, Page page, int flags)
{
Assert(page);
+
/*
* When checking the metapage, always verify magic number and version.
*/