summaryrefslogtreecommitdiff
path: root/src/backend/utils/cache
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils/cache')
-rw-r--r--src/backend/utils/cache/inval.c16
-rw-r--r--src/backend/utils/cache/plancache.c5
-rw-r--r--src/backend/utils/cache/relcache.c17
3 files changed, 18 insertions, 20 deletions
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index d22cc5a93b3..9c79775725b 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -182,7 +182,7 @@ static int numSharedInvalidMessagesArray;
static int maxSharedInvalidMessagesArray;
/* GUC storage */
-int debug_invalidate_system_caches_always = 0;
+int debug_discard_caches = 0;
/*
* Dynamically-registered callback functions. Current implementation
@@ -690,7 +690,7 @@ AcceptInvalidationMessages(void)
ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
InvalidateSystemCaches);
- /*
+ /*----------
* Test code to force cache flushes anytime a flush could happen.
*
* This helps detect intermittent faults caused by code that reads a cache
@@ -698,28 +698,28 @@ AcceptInvalidationMessages(void)
* rarely actually does so. This can spot issues that would otherwise
* only arise with badly timed concurrent DDL, for example.
*
- * The default debug_invalidate_system_caches_always = 0 does no forced
- * cache flushes.
+ * The default debug_discard_caches = 0 does no forced cache flushes.
*
* If used with CLOBBER_FREED_MEMORY,
- * debug_invalidate_system_caches_always = 1 (CLOBBER_CACHE_ALWAYS)
+ * debug_discard_caches = 1 (formerly known as CLOBBER_CACHE_ALWAYS)
* provides a fairly thorough test that the system contains no cache-flush
* hazards. However, it also makes the system unbelievably slow --- the
* regression tests take about 100 times longer than normal.
*
* If you're a glutton for punishment, try
- * debug_invalidate_system_caches_always = 3 (CLOBBER_CACHE_RECURSIVELY).
+ * debug_discard_caches = 3 (formerly known as CLOBBER_CACHE_RECURSIVELY).
* This slows things by at least a factor of 10000, so I wouldn't suggest
* trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
+ *----------
*/
-#ifdef CLOBBER_CACHE_ENABLED
+#ifdef DISCARD_CACHES_ENABLED
{
static int recursion_depth = 0;
- if (recursion_depth < debug_invalidate_system_caches_always)
+ if (recursion_depth < debug_discard_caches)
{
recursion_depth++;
InvalidateSystemCaches();
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 07b01451327..6767eae8f20 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -897,9 +897,8 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* rejected a generic plan, it's possible to reach here with is_valid
* false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
- * sinval reset event or the debug_invalidate_system_caches_always code.
- * But for safety, let's treat it as real and redo the
- * RevalidateCachedQuery call.
+ * sinval reset event or the debug_discard_caches code. But for safety,
+ * let's treat it as real and redo the RevalidateCachedQuery call.
*/
if (!plansource->is_valid)
qlist = RevalidateCachedQuery(plansource, queryEnv);
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 5dac9f06960..13d9994af3e 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -97,7 +97,7 @@
#define MAYBE_RECOVER_RELATION_BUILD_MEMORY 1
#else
#define RECOVER_RELATION_BUILD_MEMORY 0
-#ifdef CLOBBER_CACHE_ENABLED
+#ifdef DISCARD_CACHES_ENABLED
#define MAYBE_RECOVER_RELATION_BUILD_MEMORY 1
#endif
#endif
@@ -1011,10 +1011,10 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
* data, reasoning that the caller's context is at worst of transaction
* scope, and relcache loads shouldn't happen so often that it's essential
* to recover transient data before end of statement/transaction. However
- * that's definitely not true in clobber-cache test builds, and perhaps
- * it's not true in other cases.
+ * that's definitely not true when debug_discard_caches is active, and
+ * perhaps it's not true in other cases.
*
- * When cache clobbering is enabled or when forced to by
+ * When debug_discard_caches is active or when forced to by
* RECOVER_RELATION_BUILD_MEMORY=1, arrange to allocate the junk in a
* temporary context that we'll free before returning. Make it a child of
* caller's context so that it will get cleaned up appropriately if we
@@ -1024,7 +1024,7 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
MemoryContext tmpcxt = NULL;
MemoryContext oldcxt = NULL;
- if (RECOVER_RELATION_BUILD_MEMORY || debug_invalidate_system_caches_always > 0)
+ if (RECOVER_RELATION_BUILD_MEMORY || debug_discard_caches > 0)
{
tmpcxt = AllocSetContextCreate(CurrentMemoryContext,
"RelationBuildDesc workspace",
@@ -1627,11 +1627,10 @@ LookupOpclassInfo(Oid operatorClassOid,
* otherwise. However it can be helpful for detecting bugs in the cache
* loading logic itself, such as reliance on a non-nailed index. Given
* the limited use-case and the fact that this adds a great deal of
- * expense, we enable it only for high values of
- * debug_invalidate_system_caches_always.
+ * expense, we enable it only for high values of debug_discard_caches.
*/
-#ifdef CLOBBER_CACHE_ENABLED
- if (debug_invalidate_system_caches_always > 2)
+#ifdef DISCARD_CACHES_ENABLED
+ if (debug_discard_caches > 2)
opcentry->valid = false;
#endif