summaryrefslogtreecommitdiff
path: root/src/backend/commands/vacuumlazy.c
diff options
context:
space:
mode:
authorTom Lane2017-06-21 19:35:54 +0000
committerTom Lane2017-06-21 19:35:54 +0000
commit382ceffdf7f620d8f2d50e451b4167d291ae2348 (patch)
treef558251492f2c6f86e3566f7a82f9d00509122c2 /src/backend/commands/vacuumlazy.c
parentc7b8998ebbf310a156aa38022555a24d98fdbfb4 (diff)
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they flow past the right margin. By default, BSD indent lines up statement continuation lines that are within parentheses so that they start just to the right of the preceding left parenthesis. However, traditionally, if that resulted in the continuation line extending to the right of the desired right margin, then indent would push it left just far enough to not overrun the margin, if it could do so without making the continuation line start to the left of the current statement indent. That makes for a weird mix of indentations unless one has been completely rigid about never violating the 80-column limit. This behavior has been pretty universally panned by Postgres developers. Hence, disable it with indent's new -lpl switch, so that parenthesized lines are always lined up with the preceding left paren. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/[email protected] Discussion: https://postgr.es/m/[email protected]
Diffstat (limited to 'src/backend/commands/vacuumlazy.c')
-rw-r--r--src/backend/commands/vacuumlazy.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 7978c062d75..1951aa94fb1 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -167,7 +167,7 @@ static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
static int vac_cmp_itemptr(const void *left, const void *right);
static bool heap_page_is_all_visible(Relation rel, Buffer buf,
- TransactionId *visibility_cutoff_xid, bool *all_frozen);
+ TransactionId *visibility_cutoff_xid, bool *all_frozen);
/*
@@ -391,7 +391,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
vacrelstats->new_dead_tuples,
OldestXmin);
appendStringInfo(&buf,
- _("buffer usage: %d hits, %d misses, %d dirtied\n"),
+ _("buffer usage: %d hits, %d misses, %d dirtied\n"),
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty);
@@ -621,7 +621,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
uint8 vmskipflags;
vmskipflags = visibilitymap_get_status(onerel,
- next_unskippable_block,
+ next_unskippable_block,
&vmbuffer);
if (aggressive)
{
@@ -857,8 +857,8 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
empty_pages++;
}
@@ -900,7 +900,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
PageSetAllVisible(page);
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
+ VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
END_CRIT_SECTION();
}
@@ -1071,7 +1071,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
{
lazy_record_dead_tuple(vacrelstats, &(tuple.t_self));
HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
- &vacrelstats->latestRemovedXid);
+ &vacrelstats->latestRemovedXid);
tups_vacuumed += 1;
has_dead_tuples = true;
}
@@ -1087,7 +1087,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
- MultiXactCutoff, &frozen[nfrozen],
+ MultiXactCutoff, &frozen[nfrozen],
&tuple_totally_frozen))
frozen[nfrozen++].offset = offnum;
@@ -1268,7 +1268,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
/* now we can compute the new value for pg_class.reltuples */
vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
nblocks,
- vacrelstats->tupcount_pages,
+ vacrelstats->tupcount_pages,
num_tuples);
/*
@@ -1337,7 +1337,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
*/
initStringInfo(&buf);
appendStringInfo(&buf,
- _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
+ _("%.0f dead row versions cannot be removed yet, oldest xmin: %u\n"),
nkeep, OldestXmin);
appendStringInfo(&buf, _("There were %.0f unused item pointers.\n"),
nunused);
@@ -1664,7 +1664,7 @@ lazy_cleanup_index(Relation indrel,
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s.",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -1700,7 +1700,7 @@ should_attempt_truncation(LVRelStats *vacrelstats)
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable > 0 &&
(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
old_snapshot_threshold < 0)
return true;
else