Обсуждение: Re: Protect syscache from bloating with negative cache entries
At Thu, 14 Jan 2021 17:32:27 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in
> The commit 4656e3d668 (debug_invalidate_system_caches_always)
> conflicted with this patch. Rebased.
At Wed, 27 Jan 2021 10:07:47 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in
> (I found a bug in a benchmark-aid function
> (CatalogCacheFlushCatalog2), I repost an updated version soon.)
I noticed that a catcachebench-aid function
CatalogCacheFlushCatalog2() allocates bucked array wrongly in the
current memory context, which leads to a crash.
This is a fixed it then rebased version.
--
Kyotaro Horiguchi
NTT Open Source Software Center
From 5f318170b9c1e0caa1033862261800f06135e5bd Mon Sep 17 00:00:00 2001
From: Kyotaro Horiguchi <horikyoga.ntt@gmail.com>
Date: Wed, 18 Nov 2020 16:54:31 +0900
Subject: [PATCH v7 1/3] CatCache expiration feature
---
src/backend/access/transam/xact.c | 3 ++
src/backend/utils/cache/catcache.c | 87 +++++++++++++++++++++++++++++-
src/backend/utils/misc/guc.c | 12 +++++
src/include/utils/catcache.h | 19 +++++++
4 files changed, 120 insertions(+), 1 deletion(-)
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index a2068e3fd4..86888d2409 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1086,6 +1086,9 @@ static void
AtStart_Cache(void)
{
AcceptInvalidationMessages();
+
+ if (xactStartTimestamp != 0)
+ SetCatCacheClock(xactStartTimestamp);
}
/*
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index fa2b49c676..644d92dd9a 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -38,6 +38,7 @@
#include "utils/rel.h"
#include "utils/resowner_private.h"
#include "utils/syscache.h"
+#include "utils/timestamp.h"
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
@@ -60,9 +61,19 @@
#define CACHE_elog(...)
#endif
+/*
+ * GUC variable to define the minimum age of entries that will be considered
+ * to be evicted in seconds. -1 to disable the feature.
+ */
+int catalog_cache_prune_min_age = -1;
+uint64 prune_min_age_us;
+
/* Cache management header --- pointer is NULL until created */
static CatCacheHeader *CacheHdr = NULL;
+/* Clock for the last accessed time of a catcache entry. */
+uint64 catcacheclock = 0;
+
static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
int nkeys,
Datum v1, Datum v2,
@@ -74,6 +85,7 @@ static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
Index hashIndex,
Datum v1, Datum v2,
Datum v3, Datum v4);
+static bool CatCacheCleanupOldEntries(CatCache *cp);
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
Datum v1, Datum v2, Datum v3, Datum v4);
@@ -99,6 +111,15 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
+/* GUC assign function */
+void
+assign_catalog_cache_prune_min_age(int newval, void *extra)
+{
+ if (newval < 0)
+ prune_min_age_us = UINT64_MAX;
+ else
+ prune_min_age_us = ((uint64) newval) * USECS_PER_SEC;
+}
/*
* internal support functions
@@ -1264,6 +1285,9 @@ SearchCatCacheInternal(CatCache *cache,
*/
dlist_move_head(bucket, &ct->cache_elem);
+ /* Record the last access timestamp */
+ ct->lastaccess = catcacheclock;
+
/*
* If it's a positive entry, bump its refcount and return it. If it's
* negative, we can report failure to the caller.
@@ -1425,6 +1449,61 @@ SearchCatCacheMiss(CatCache *cache,
return &ct->tuple;
}
+/*
+ * CatCacheCleanupOldEntries - Remove infrequently-used entries
+ *
+ * Catcache entries happen to be left unused for a long time for several
+ * reasons. Remove such entries to prevent catcache from bloating. It is based
+ * on the similar algorithm with buffer eviction. Entries that are accessed
+ * several times in a certain period live longer than those that have had less
+ * access in the same duration.
+ */
+static bool
+CatCacheCleanupOldEntries(CatCache *cp)
+{
+ int nremoved = 0;
+ int i;
+ long oldest_ts = catcacheclock;
+ uint64 prune_threshold = catcacheclock - prune_min_age_us;
+
+ /* Scan over the whole hash to find entries to remove */
+ for (i = 0 ; i < cp->cc_nbuckets ; i++)
+ {
+ dlist_mutable_iter iter;
+
+ dlist_foreach_modify(iter, &cp->cc_bucket[i])
+ {
+ CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
+
+ /* Don't remove referenced entries */
+ if (ct->refcount == 0 &&
+ (ct->c_list == NULL || ct->c_list->refcount == 0))
+ {
+ if (ct->lastaccess < prune_threshold)
+ {
+ CatCacheRemoveCTup(cp, ct);
+ nremoved++;
+
+ /* don't let the removed entry update oldest_ts */
+ continue;
+ }
+ }
+
+ /* update the oldest timestamp if the entry remains alive */
+ if (ct->lastaccess < oldest_ts)
+ oldest_ts = ct->lastaccess;
+ }
+ }
+
+ cp->cc_oldest_ts = oldest_ts;
+
+ if (nremoved > 0)
+ elog(DEBUG1, "pruning catalog cache id=%d for %s: removed %d / %d",
+ cp->id, cp->cc_relname, nremoved, cp->cc_ntup + nremoved);
+
+ return nremoved > 0;
+}
+
/*
* ReleaseCatCache
*
@@ -1888,6 +1967,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
ct->dead = false;
ct->negative = negative;
ct->hash_value = hashValue;
+ ct->lastaccess = catcacheclock;
dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
@@ -1899,7 +1979,12 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
* arbitrarily, we enlarge when fill factor > 2.
*/
if (cache->cc_ntup > cache->cc_nbuckets * 2)
- RehashCatCache(cache);
+ {
+ /* try removing old entries before expanding hash */
+ if (catcacheclock - cache->cc_oldest_ts < prune_min_age_us ||
+ !CatCacheCleanupOldEntries(cache))
+ RehashCatCache(cache);
+ }
return ct;
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 17579eeaca..255e9fa73d 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -88,6 +88,7 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/bytea.h"
+#include "utils/catcache.h"
#include "utils/float.h"
#include "utils/guc_tables.h"
#include "utils/memutils.h"
@@ -3445,6 +3446,17 @@ static struct config_int ConfigureNamesInt[] =
NULL, NULL, NULL
},
+ {
+ {"catalog_cache_prune_min_age", PGC_USERSET, RESOURCES_MEM,
+ gettext_noop("System catalog cache entries that are living unused more than this seconds are considered
forremoval."),
+ gettext_noop("The value of -1 turns off pruning."),
+ GUC_UNIT_S
+ },
+ &catalog_cache_prune_min_age,
+ -1, -1, INT_MAX,
+ NULL, assign_catalog_cache_prune_min_age, NULL
+ },
+
/* End-of-list marker */
{
{NULL, 0, 0, NULL, NULL}, NULL, 0, 0, 0, NULL, NULL, NULL
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index ddc2762eb3..291e857e38 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -22,6 +22,7 @@
#include "access/htup.h"
#include "access/skey.h"
+#include "datatype/timestamp.h"
#include "lib/ilist.h"
#include "utils/relcache.h"
@@ -61,6 +62,7 @@ typedef struct catcache
slist_node cc_next; /* list link */
ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap
* scans */
+ uint64 cc_oldest_ts; /* timestamp (us) of the oldest tuple */
/*
* Keep these at the end, so that compiling catcache.c with CATCACHE_STATS
@@ -119,6 +121,7 @@ typedef struct catctup
bool dead; /* dead but not yet removed? */
bool negative; /* negative cache entry? */
HeapTupleData tuple; /* tuple management header */
+ uint64 lastaccess; /* timestamp in us of the last usage */
/*
* The tuple may also be a member of at most one CatCList. (If a single
@@ -189,6 +192,22 @@ typedef struct catcacheheader
/* this extern duplicates utils/memutils.h... */
extern PGDLLIMPORT MemoryContext CacheMemoryContext;
+
+/* for guc.c, not PGDLLPMPORT'ed */
+extern int catalog_cache_prune_min_age;
+
+/* source clock for access timestamp of catcache entries */
+extern uint64 catcacheclock;
+
+/* SetCatCacheClock - set catcache timestamp source clock */
+static inline void
+SetCatCacheClock(TimestampTz ts)
+{
+ catcacheclock = (uint64) ts;
+}
+
+extern void assign_catalog_cache_prune_min_age(int newval, void *extra);
+
extern void CreateCacheMemoryContext(void);
extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid,
--
2.27.0
From 6a3985b4e6952d3c60f328a82971709c59e819ab Mon Sep 17 00:00:00 2001
From: Kyotaro Horiguchi <horikyoga.ntt@gmail.com>
Date: Wed, 18 Nov 2020 16:57:05 +0900
Subject: [PATCH v7 2/3] Remove "dead" flag from catcache tuple
---
src/backend/utils/cache/catcache.c | 43 +++++++++++++-----------------
src/include/utils/catcache.h | 10 -------
2 files changed, 18 insertions(+), 35 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 644d92dd9a..611b65168d 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -480,6 +480,13 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Assert(ct->refcount == 0);
Assert(ct->my_cache == cache);
+ /* delink from linked list if not yet */
+ if (ct->cache_elem.prev)
+ {
+ dlist_delete(&ct->cache_elem);
+ ct->cache_elem.prev = NULL;
+ }
+
if (ct->c_list)
{
/*
@@ -487,14 +494,10 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
* which will recurse back to me, and the recursive call will do the
* work. Set the "dead" flag to make sure it does recurse.
*/
- ct->dead = true;
CatCacheRemoveCList(cache, ct->c_list);
return; /* nothing left to do */
}
- /* delink from linked list */
- dlist_delete(&ct->cache_elem);
-
/*
* Free keys when we're dealing with a negative entry, normal entries just
* point into tuple, allocated together with the CatCTup.
@@ -534,7 +537,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
/* if the member is dead and now has no references, remove it */
if (
#ifndef CATCACHE_FORCE_RELEASE
- ct->dead &&
+ ct->cache_elem.prev == NULL &&
#endif
ct->refcount == 0)
CatCacheRemoveCTup(cache, ct);
@@ -609,7 +612,9 @@ CatCacheInvalidate(CatCache *cache, uint32 hashValue)
if (ct->refcount > 0 ||
(ct->c_list && ct->c_list->refcount > 0))
{
- ct->dead = true;
+ dlist_delete(&ct->cache_elem);
+ ct->cache_elem.prev = NULL;
+
/* list, if any, was marked dead above */
Assert(ct->c_list == NULL || ct->c_list->dead);
}
@@ -688,7 +693,8 @@ ResetCatalogCache(CatCache *cache)
if (ct->refcount > 0 ||
(ct->c_list && ct->c_list->refcount > 0))
{
- ct->dead = true;
+ dlist_delete(&ct->cache_elem);
+ ct->cache_elem.prev = NULL;
/* list, if any, was marked dead above */
Assert(ct->c_list == NULL || ct->c_list->dead);
}
@@ -1268,9 +1274,6 @@ SearchCatCacheInternal(CatCache *cache,
{
ct = dlist_container(CatCTup, cache_elem, iter.cur);
- if (ct->dead)
- continue; /* ignore dead entries */
-
if (ct->hash_value != hashValue)
continue; /* quickly skip entry if wrong hash val */
@@ -1522,7 +1525,6 @@ ReleaseCatCache(HeapTuple tuple)
offsetof(CatCTup, tuple));
/* Safety checks to ensure we were handed a cache entry */
- Assert(ct->ct_magic == CT_MAGIC);
Assert(ct->refcount > 0);
ct->refcount--;
@@ -1530,7 +1532,7 @@ ReleaseCatCache(HeapTuple tuple)
if (
#ifndef CATCACHE_FORCE_RELEASE
- ct->dead &&
+ ct->cache_elem.prev == NULL &&
#endif
ct->refcount == 0 &&
(ct->c_list == NULL || ct->c_list->refcount == 0))
@@ -1737,8 +1739,8 @@ SearchCatCacheList(CatCache *cache,
{
ct = dlist_container(CatCTup, cache_elem, iter.cur);
- if (ct->dead || ct->negative)
- continue; /* ignore dead and negative entries */
+ if (ct->negative)
+ continue; /* ignore negative entries */
if (ct->hash_value != hashValue)
continue; /* quickly skip entry if wrong hash val */
@@ -1799,14 +1801,13 @@ SearchCatCacheList(CatCache *cache,
{
foreach(ctlist_item, ctlist)
{
+ Assert (ct->cache_elem.prev != NULL);
+
ct = (CatCTup *) lfirst(ctlist_item);
Assert(ct->c_list == NULL);
Assert(ct->refcount > 0);
ct->refcount--;
if (
-#ifndef CATCACHE_FORCE_RELEASE
- ct->dead &&
-#endif
ct->refcount == 0 &&
(ct->c_list == NULL || ct->c_list->refcount == 0))
CatCacheRemoveCTup(cache, ct);
@@ -1834,9 +1835,6 @@ SearchCatCacheList(CatCache *cache,
/* release the temporary refcount on the member */
Assert(ct->refcount > 0);
ct->refcount--;
- /* mark list dead if any members already dead */
- if (ct->dead)
- cl->dead = true;
}
Assert(i == nmembers);
@@ -1960,11 +1958,9 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
* Finish initializing the CatCTup header, and add it to the cache's
* linked list and counts.
*/
- ct->ct_magic = CT_MAGIC;
ct->my_cache = cache;
ct->c_list = NULL;
ct->refcount = 0; /* for the moment */
- ct->dead = false;
ct->negative = negative;
ct->hash_value = hashValue;
ct->lastaccess = catcacheclock;
@@ -2158,9 +2154,6 @@ PrintCatCacheLeakWarning(HeapTuple tuple)
CatCTup *ct = (CatCTup *) (((char *) tuple) -
offsetof(CatCTup, tuple));
- /* Safety check to ensure we were handed a cache entry */
- Assert(ct->ct_magic == CT_MAGIC);
-
elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
ct->my_cache->cc_relname, ct->my_cache->id,
ItemPointerGetBlockNumber(&(tuple->t_self)),
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 291e857e38..53b0bf31eb 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -87,9 +87,6 @@ typedef struct catcache
typedef struct catctup
{
- int ct_magic; /* for identifying CatCTup entries */
-#define CT_MAGIC 0x57261502
-
uint32 hash_value; /* hash value for this tuple's keys */
/*
@@ -106,19 +103,12 @@ typedef struct catctup
dlist_node cache_elem; /* list member of per-bucket list */
/*
- * A tuple marked "dead" must not be returned by subsequent searches.
- * However, it won't be physically deleted from the cache until its
- * refcount goes to zero. (If it's a member of a CatCList, the list's
- * refcount must go to zero, too; also, remember to mark the list dead at
- * the same time the tuple is marked.)
- *
* A negative cache entry is an assertion that there is no tuple matching
* a particular key. This is just as useful as a normal entry so far as
* avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
int refcount; /* number of active references */
- bool dead; /* dead but not yet removed? */
bool negative; /* negative cache entry? */
HeapTupleData tuple; /* tuple management header */
uint64 lastaccess; /* timestamp in us of the last usage */
--
2.27.0
From 386180566a5162daf25e33494c6bdbf8d4c30ac4 Mon Sep 17 00:00:00 2001
From: Kyotaro Horiguchi <horikyoga.ntt@gmail.com>
Date: Wed, 18 Nov 2020 16:56:41 +0900
Subject: [PATCH v7 3/3] catcachebench
---
contrib/catcachebench/Makefile | 17 +
contrib/catcachebench/catcachebench--0.0.sql | 14 +
contrib/catcachebench/catcachebench.c | 330 +++++++++++++++++++
contrib/catcachebench/catcachebench.control | 6 +
src/backend/utils/cache/catcache.c | 35 ++
src/backend/utils/cache/syscache.c | 2 +-
6 files changed, 403 insertions(+), 1 deletion(-)
create mode 100644 contrib/catcachebench/Makefile
create mode 100644 contrib/catcachebench/catcachebench--0.0.sql
create mode 100644 contrib/catcachebench/catcachebench.c
create mode 100644 contrib/catcachebench/catcachebench.control
diff --git a/contrib/catcachebench/Makefile b/contrib/catcachebench/Makefile
new file mode 100644
index 0000000000..0478818b25
--- /dev/null
+++ b/contrib/catcachebench/Makefile
@@ -0,0 +1,17 @@
+MODULE_big = catcachebench
+OBJS = catcachebench.o
+
+EXTENSION = catcachebench
+DATA = catcachebench--0.0.sql
+PGFILEDESC = "catcachebench - benchmark for catcache pruning feature"
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/catcachebench
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/catcachebench/catcachebench--0.0.sql b/contrib/catcachebench/catcachebench--0.0.sql
new file mode 100644
index 0000000000..ea9cd62abb
--- /dev/null
+++ b/contrib/catcachebench/catcachebench--0.0.sql
@@ -0,0 +1,14 @@
+/* contrib/catcachebench/catcachebench--0.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION catcachebench" to load this file. \quit
+
+CREATE FUNCTION catcachebench(IN type int)
+RETURNS double precision
+AS 'MODULE_PATHNAME', 'catcachebench'
+LANGUAGE C STRICT VOLATILE;
+
+CREATE FUNCTION catcachereadstats(OUT catid int, OUT reloid oid, OUT searches bigint, OUT hits bigint, OUT neg_hits
bigint)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'catcachereadstats'
+LANGUAGE C STRICT VOLATILE;
diff --git a/contrib/catcachebench/catcachebench.c b/contrib/catcachebench/catcachebench.c
new file mode 100644
index 0000000000..b5a4d794ed
--- /dev/null
+++ b/contrib/catcachebench/catcachebench.c
@@ -0,0 +1,330 @@
+/*
+ * catcachebench: test code for cache pruning feature
+ */
+/* #define CATCACHE_STATS */
+#include "postgres.h"
+#include "catalog/pg_type.h"
+#include "catalog/pg_statistic.h"
+#include "executor/spi.h"
+#include "funcapi.h"
+#include "libpq/pqsignal.h"
+#include "utils/catcache.h"
+#include "utils/syscache.h"
+#include "utils/timestamp.h"
+
+Oid tableoids[10000];
+int ntables = 0;
+int16 attnums[1000];
+int natts = 0;
+
+PG_MODULE_MAGIC;
+
+double catcachebench1(void);
+double catcachebench2(void);
+double catcachebench3(void);
+void collectinfo(void);
+void catcachewarmup(void);
+
+PG_FUNCTION_INFO_V1(catcachebench);
+PG_FUNCTION_INFO_V1(catcachereadstats);
+
+extern void CatalogCacheFlushCatalog2(Oid catId);
+extern int64 catcache_called;
+extern CatCache *SysCache[];
+
+typedef struct catcachestatsstate
+{
+ TupleDesc tupd;
+ int catId;
+} catcachestatsstate;
+
+Datum
+catcachereadstats(PG_FUNCTION_ARGS)
+{
+ catcachestatsstate *state_data = NULL;
+ FuncCallContext *fctx;
+
+ if (SRF_IS_FIRSTCALL())
+ {
+ TupleDesc tupdesc;
+ MemoryContext mctx;
+
+ fctx = SRF_FIRSTCALL_INIT();
+ mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
+
+ state_data = palloc(sizeof(catcachestatsstate));
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ state_data->tupd = tupdesc;
+ state_data->catId = 0;
+
+ fctx->user_fctx = state_data;
+
+ MemoryContextSwitchTo(mctx);
+ }
+
+ fctx = SRF_PERCALL_SETUP();
+ state_data = fctx->user_fctx;
+
+ if (state_data->catId < SysCacheSize)
+ {
+ Datum values[5];
+ bool nulls[5];
+ HeapTuple resulttup;
+ Datum result;
+ int catId = state_data->catId++;
+
+ memset(nulls, 0, sizeof(nulls));
+ memset(values, 0, sizeof(values));
+ values[0] = Int16GetDatum(catId);
+ values[1] = ObjectIdGetDatum(SysCache[catId]->cc_reloid);
+#ifdef CATCACHE_STATS
+ values[2] = Int64GetDatum(SysCache[catId]->cc_searches);
+ values[3] = Int64GetDatum(SysCache[catId]->cc_hits);
+ values[4] = Int64GetDatum(SysCache[catId]->cc_neg_hits);
+#endif
+ resulttup = heap_form_tuple(state_data->tupd, values, nulls);
+ result = HeapTupleGetDatum(resulttup);
+
+ SRF_RETURN_NEXT(fctx, result);
+ }
+
+ SRF_RETURN_DONE(fctx);
+}
+
+Datum
+catcachebench(PG_FUNCTION_ARGS)
+{
+ int testtype = PG_GETARG_INT32(0);
+ double ms;
+
+ collectinfo();
+
+ /* flush the catalog -- safe? don't mind. */
+ CatalogCacheFlushCatalog2(StatisticRelationId);
+
+ switch (testtype)
+ {
+ case 0:
+ catcachewarmup(); /* prewarm of syscatalog */
+ PG_RETURN_NULL();
+ case 1:
+ ms = catcachebench1(); break;
+ case 2:
+ ms = catcachebench2(); break;
+ case 3:
+ ms = catcachebench3(); break;
+ default:
+ elog(ERROR, "Invalid test type: %d", testtype);
+ }
+
+ PG_RETURN_DATUM(Float8GetDatum(ms));
+}
+
+/*
+ * fetch all attribute entires of all tables.
+ */
+double
+catcachebench1(void)
+{
+ int t, a;
+ instr_time start,
+ duration;
+
+ PG_SETMASK(&BlockSig);
+ INSTR_TIME_SET_CURRENT(start);
+ for (t = 0 ; t < ntables ; t++)
+ {
+ for (a = 0 ; a < natts ; a++)
+ {
+ HeapTuple tup;
+
+ tup = SearchSysCache3(STATRELATTINH,
+ ObjectIdGetDatum(tableoids[t]),
+ Int16GetDatum(attnums[a]),
+ BoolGetDatum(false));
+ /* should be null, but.. */
+ if (HeapTupleIsValid(tup))
+ ReleaseSysCache(tup);
+ }
+ }
+ INSTR_TIME_SET_CURRENT(duration);
+ INSTR_TIME_SUBTRACT(duration, start);
+ PG_SETMASK(&UnBlockSig);
+
+ return INSTR_TIME_GET_MILLISEC(duration);
+};
+
+/*
+ * fetch all attribute entires of a table 6000 times.
+ */
+double
+catcachebench2(void)
+{
+ int t, a;
+ instr_time start,
+ duration;
+
+ PG_SETMASK(&BlockSig);
+ INSTR_TIME_SET_CURRENT(start);
+ for (t = 0 ; t < 240000 ; t++)
+ {
+ for (a = 0 ; a < natts ; a++)
+ {
+ HeapTuple tup;
+
+ tup = SearchSysCache3(STATRELATTINH,
+ ObjectIdGetDatum(tableoids[0]),
+ Int16GetDatum(attnums[a]),
+ BoolGetDatum(false));
+ /* should be null, but.. */
+ if (HeapTupleIsValid(tup))
+ ReleaseSysCache(tup);
+ }
+ }
+ INSTR_TIME_SET_CURRENT(duration);
+ INSTR_TIME_SUBTRACT(duration, start);
+ PG_SETMASK(&UnBlockSig);
+
+ return INSTR_TIME_GET_MILLISEC(duration);
+};
+
+/*
+ * fetch all attribute entires of all tables twice with having expiration
+ * happen.
+ */
+double
+catcachebench3(void)
+{
+ const int clock_step = 1000;
+ int i, t, a;
+ instr_time start,
+ duration;
+
+ PG_SETMASK(&BlockSig);
+ INSTR_TIME_SET_CURRENT(start);
+ for (i = 0 ; i < 4 ; i++)
+ {
+ int ct = clock_step;
+
+ for (t = 0 ; t < ntables ; t++)
+ {
+ /*
+ * catcacheclock is updated by transaction timestamp, so needs to
+ * be updated by other means for this test to work. Here I choosed
+ * to update the clock every 1000 tables scan.
+ */
+ if (--ct < 0)
+ {
+ SetCatCacheClock(GetCurrentTimestamp());
+ ct = clock_step;
+ }
+ for (a = 0 ; a < natts ; a++)
+ {
+ HeapTuple tup;
+
+ tup = SearchSysCache3(STATRELATTINH,
+ ObjectIdGetDatum(tableoids[t]),
+ Int16GetDatum(attnums[a]),
+ BoolGetDatum(false));
+ /* should be null, but.. */
+ if (HeapTupleIsValid(tup))
+ ReleaseSysCache(tup);
+ }
+ }
+ }
+ INSTR_TIME_SET_CURRENT(duration);
+ INSTR_TIME_SUBTRACT(duration, start);
+ PG_SETMASK(&UnBlockSig);
+
+ return INSTR_TIME_GET_MILLISEC(duration);
+};
+
+void
+catcachewarmup(void)
+{
+ int t, a;
+
+ /* load up catalog tables */
+ for (t = 0 ; t < ntables ; t++)
+ {
+ for (a = 0 ; a < natts ; a++)
+ {
+ HeapTuple tup;
+
+ tup = SearchSysCache3(STATRELATTINH,
+ ObjectIdGetDatum(tableoids[t]),
+ Int16GetDatum(attnums[a]),
+ BoolGetDatum(false));
+ /* should be null, but.. */
+ if (HeapTupleIsValid(tup))
+ ReleaseSysCache(tup);
+ }
+ }
+}
+
+void
+collectinfo(void)
+{
+ int ret;
+ Datum values[10000];
+ bool nulls[10000];
+ Oid types0[] = {OIDOID};
+ int i;
+
+ ntables = 0;
+ natts = 0;
+
+ SPI_connect();
+ /* collect target tables */
+ ret = SPI_execute("select oid from pg_class where relnamespace = (select oid from pg_namespace where nspname =
\'test\')",
+ true, 0);
+ if (ret != SPI_OK_SELECT)
+ elog(ERROR, "Failed 1");
+ if (SPI_processed == 0)
+ elog(ERROR, "no relation found in schema \"test\"");
+ if (SPI_processed > 10000)
+ elog(ERROR, "too many relation found in schema \"test\"");
+
+ for (i = 0 ; i < SPI_processed ; i++)
+ {
+ heap_deform_tuple(SPI_tuptable->vals[i], SPI_tuptable->tupdesc,
+ values, nulls);
+ if (nulls[0])
+ elog(ERROR, "Failed 2");
+
+ tableoids[ntables++] = DatumGetObjectId(values[0]);
+ }
+ SPI_finish();
+ elog(DEBUG1, "%d tables found", ntables);
+
+ values[0] = ObjectIdGetDatum(tableoids[0]);
+ nulls[0] = false;
+ SPI_connect();
+ ret = SPI_execute_with_args("select attnum from pg_attribute where attrelid = (select oid from pg_class where oid
=$1)",
+ 1, types0, values, NULL, true, 0);
+ if (SPI_processed == 0)
+ elog(ERROR, "no attribute found in table %d", tableoids[0]);
+ if (SPI_processed > 10000)
+ elog(ERROR, "too many relation found in table %d", tableoids[0]);
+
+ /* collect target attributes. assuming all tables have the same attnums */
+ for (i = 0 ; i < SPI_processed ; i++)
+ {
+ int16 attnum;
+
+ heap_deform_tuple(SPI_tuptable->vals[i], SPI_tuptable->tupdesc,
+ values, nulls);
+ if (nulls[0])
+ elog(ERROR, "Failed 3");
+ attnum = DatumGetInt16(values[0]);
+
+ if (attnum > 0)
+ attnums[natts++] = attnum;
+ }
+ SPI_finish();
+ elog(DEBUG1, "%d attributes found", natts);
+}
diff --git a/contrib/catcachebench/catcachebench.control b/contrib/catcachebench/catcachebench.control
new file mode 100644
index 0000000000..3fc9d2e420
--- /dev/null
+++ b/contrib/catcachebench/catcachebench.control
@@ -0,0 +1,6 @@
+# catcachebench
+
+comment = 'benchmark for catcache pruning'
+default_version = '0.0'
+module_pathname = '$libdir/catcachebench'
+relocatable = true
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 611b65168d..f458bada3e 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -767,6 +767,41 @@ CatalogCacheFlushCatalog(Oid catId)
CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
}
+
+/* FUNCTION FOR BENCHMARKING */
+void
+CatalogCacheFlushCatalog2(Oid catId)
+{
+ slist_iter iter;
+
+ CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
+
+ slist_foreach(iter, &CacheHdr->ch_caches)
+ {
+ CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
+
+ /* Does this cache store tuples of the target catalog? */
+ if (cache->cc_reloid == catId)
+ {
+ /* Yes, so flush all its contents */
+ ResetCatalogCache(cache);
+
+ /* Tell inval.c to call syscache callbacks for this cache */
+ CallSyscacheCallbacks(cache->id, 0);
+
+ cache->cc_nbuckets = 128;
+ pfree(cache->cc_bucket);
+ cache->cc_bucket =
+ (dlist_head *) MemoryContextAllocZero(CacheMemoryContext,
+ cache->cc_nbuckets * sizeof(dlist_head));
+ elog(LOG, "Catcache reset");
+ }
+ }
+
+ CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
+}
+/* END: FUNCTION FOR BENCHMARKING */
+
/*
* InitCatCache
*
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index e4dc4ee34e..b60416ec63 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -994,7 +994,7 @@ static const struct cachedesc cacheinfo[] = {
}
};
-static CatCache *SysCache[SysCacheSize];
+CatCache *SysCache[SysCacheSize];
static bool CacheInitialized = false;
--
2.27.0
On 27/01/2021 03:13, Kyotaro Horiguchi wrote: > At Thu, 14 Jan 2021 17:32:27 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in >> The commit 4656e3d668 (debug_invalidate_system_caches_always) >> conflicted with this patch. Rebased. > > At Wed, 27 Jan 2021 10:07:47 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in >> (I found a bug in a benchmark-aid function >> (CatalogCacheFlushCatalog2), I repost an updated version soon.) > > I noticed that a catcachebench-aid function > CatalogCacheFlushCatalog2() allocates bucked array wrongly in the > current memory context, which leads to a crash. > > This is a fixed it then rebased version. Thanks, with the scripts you provided, I was able to run the performance tests on my laptop, and got very similar results as you did. The impact of v7-0002-Remove-dead-flag-from-catcache-tuple.patch is very small. I think I could see it in the tests, but only barely. And the tests did nothing else than do syscache lookups; in any real world scenario, it would be lost in noise. I think we can put that aside for now, and focus on v6-0001-CatCache-expiration-feature.patch: The pruning is still pretty lethargic: - Entries created in the same transactions are never pruned away - The size of the hash table is never shrunk. So even though the patch puts a backstop to the hash table growing indefinitely, if you run one transaction that bloats the cache, it's bloated for the rest of the session. I think that's OK. We might want to be more aggressive in the future, but for now it seems reasonable to lean towards the current behavior where nothing is pruned. Although I wonder if we should try to set 'catcacheclock' more aggressively. I think we could set it whenever GetCurrentTimestamp() is called, for example. Given how unaggressive this mechanism is, I think it should be safe to enable it by default. What would be a suitable default for catalog_cache_prune_min_age? 30 seconds? Documentation needs to be updated for the new GUC. Attached is a version with a few little cleanups: - use TimestampTz instead of uint64 for the timestamps - remove assign_catalog_cache_prune_min_age(). All it did was convert the GUC's value from seconds to microseconds, and stored it in a separate variable. Multiplication is cheap, so we can just do it when we use the GUC's value instead. - Heikki
Вложения
At Wed, 27 Jan 2021 13:11:55 +0200, Heikki Linnakangas <hlinnaka@iki.fi> wrote in
> On 27/01/2021 03:13, Kyotaro Horiguchi wrote:
> > At Thu, 14 Jan 2021 17:32:27 +0900 (JST), Kyotaro Horiguchi
> > <horikyota.ntt@gmail.com> wrote in
> >> The commit 4656e3d668 (debug_invalidate_system_caches_always)
> >> conflicted with this patch. Rebased.
> > At Wed, 27 Jan 2021 10:07:47 +0900 (JST), Kyotaro Horiguchi
> > <horikyota.ntt@gmail.com> wrote in
> >> (I found a bug in a benchmark-aid function
> >> (CatalogCacheFlushCatalog2), I repost an updated version soon.)
> > I noticed that a catcachebench-aid function
> > CatalogCacheFlushCatalog2() allocates bucked array wrongly in the
> > current memory context, which leads to a crash.
> > This is a fixed it then rebased version.
>
> Thanks, with the scripts you provided, I was able to run the
> performance tests on my laptop, and got very similar results as you
> did.
>
> The impact of v7-0002-Remove-dead-flag-from-catcache-tuple.patch is
> very small. I think I could see it in the tests, but only barely. And
> the tests did nothing else than do syscache lookups; in any real world
> scenario, it would be lost in noise. I think we can put that aside for
> now, and focus on v6-0001-CatCache-expiration-feature.patch:
I agree to that opinion. But a bit dissapointing that the long
struggle ended up in vain:p
> The pruning is still pretty lethargic:
>
> - Entries created in the same transactions are never pruned away
>
> - The size of the hash table is never shrunk. So even though the patch
> - puts a backstop to the hash table growing indefinitely, if you run one
> - transaction that bloats the cache, it's bloated for the rest of the
> - session.
Right. But more frequent check impacts on performance. We can do more
aggressive pruning in idle-time.
> I think that's OK. We might want to be more aggressive in the future,
> but for now it seems reasonable to lean towards the current behavior
> where nothing is pruned. Although I wonder if we should try to set
> 'catcacheclock' more aggressively. I think we could set it whenever
> GetCurrentTimestamp() is called, for example.
Ah. I didn't thought that direction. global_last_acquired_timestamp or
such?
> Given how unaggressive this mechanism is, I think it should be safe to
> enable it by default. What would be a suitable default for
> catalog_cache_prune_min_age? 30 seconds?
Without a detailed thought, it seems a bit too short. The
ever-suggested value for the variable is 300-600s. That is,
intermittent queries with about 5-10 minutes intervals don't lose
cache entries.
In a bad case, two queries alternately remove each other's cache
entries.
Q1: adds 100 entries
<1 minute passed>
Q2: adds 100 entries but rehash is going to happen at 150 entries and
the existing 100 entreis added by Q1 are removed.
<1 minute passed>
Q1: adds 100 entries but rehash is going to happen at 150 entries and
the existing 100 entreis added by Q2 are removed.
<repeats>
Or a transaction sequence persists longer than that seconds may lose
some of the catcache entries.
> Documentation needs to be updated for the new GUC.
>
> Attached is a version with a few little cleanups:
> - use TimestampTz instead of uint64 for the timestamps
> - remove assign_catalog_cache_prune_min_age(). All it did was convert
> - the GUC's value from seconds to microseconds, and stored it in a
> - separate variable. Multiplication is cheap, so we can just do it when
> - we use the GUC's value instead.
Yeah, the laater is a trace of the struggle for cutting down cpu
cycles in the normal paths. I don't object to do so.
I found that some comments are apparently stale. cp->cc_oldest_ts is
not used anywhere, but it is added for the decision of whether to scan
or not.
I fixed the following points in the attached.
- Removed some comments that is obvious. ("Timestamp in us")
- Added cp->cc_oldest_ts check in CatCacheCleanupOldEntries.
- Set the default value for catalog_cache_prune_min_age to 600s.
- Added a doc entry for the new GUC in the resoruce/memory section.
- Fix some code comments.
- Adjust pruning criteria from (ct->lastaccess < prune_threshold) to <=.
I was going to write in the doc something like "you can inspect memory
consumption by catalog caches using pg_backend_memory_contexts", but
all the memory used by catalog cache is in CacheMemoryContext. Is it
sensible for each catalog cache to have their own contexts?
regards.
--
Kyotaro Horiguchi
NTT Open Source Software Center
From 01d32ffa499ee9185e222fe6fa3d39ad6ac5ff37 Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Wed, 27 Jan 2021 13:08:08 +0200
Subject: [PATCH v9] CatCache expiration feature
Author: Kyotaro Horiguchi
---
doc/src/sgml/config.sgml | 20 +++++++
src/backend/access/transam/xact.c | 3 ++
src/backend/utils/cache/catcache.c | 85 +++++++++++++++++++++++++++++-
src/backend/utils/misc/guc.c | 12 +++++
src/include/utils/catcache.h | 17 ++++++
5 files changed, 136 insertions(+), 1 deletion(-)
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index f1037df5a9..14be8061ce 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -1960,6 +1960,26 @@ include_dir 'conf.d'
</listitem>
</varlistentry>
+ <varlistentry id="guc-catalog-cache-prune-min-age" xreflabel="catalog_cache_prune_min_age">
+ <term><varname>catalog_cache_prune_min_age</varname> (<type>integer</type>)
+ <indexterm>
+ <primary><varname>catalog_cache_prune_min_age</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ Setting <varname>catalog_cache_prune_min_age</varname> allows catalog
+ cache entries older than this seconds removed. A value
+ of <literal>-1</literal> disables this feature, effectively setting
+ the value to infinity. The default is 600 seconds. You can reduce
+ this value to reduce the amount of memory used by the catalog cache in
+ exchange of possible performance degradation or increase it to gain in
+ performance of intermittently executed queries in exchange of the
+ possible increase of memory usage.
+ </para>
+ </listitem>
+ </varlistentry>
+
</variablelist>
</sect2>
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index a2068e3fd4..86888d2409 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1086,6 +1086,9 @@ static void
AtStart_Cache(void)
{
AcceptInvalidationMessages();
+
+ if (xactStartTimestamp != 0)
+ SetCatCacheClock(xactStartTimestamp);
}
/*
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index fa2b49c676..3e24a81992 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -38,6 +38,7 @@
#include "utils/rel.h"
#include "utils/resowner_private.h"
#include "utils/syscache.h"
+#include "utils/timestamp.h"
/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
@@ -60,9 +61,18 @@
#define CACHE_elog(...)
#endif
+/*
+ * GUC variable to define the minimum age of entries that are the candidates
+ * for evictetion in seconds. -1 to disable the feature.
+ */
+int catalog_cache_prune_min_age = -1;
+
/* Cache management header --- pointer is NULL until created */
static CatCacheHeader *CacheHdr = NULL;
+/* Clock for the last accessed time of catcache entry. */
+TimestampTz catcacheclock = 0;
+
static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
int nkeys,
Datum v1, Datum v2,
@@ -74,6 +84,7 @@ static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
Index hashIndex,
Datum v1, Datum v2,
Datum v3, Datum v4);
+static bool CatCacheCleanupOldEntries(CatCache *cp);
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
Datum v1, Datum v2, Datum v3, Datum v4);
@@ -833,6 +844,7 @@ InitCatCache(int id,
cp->cc_nkeys = nkeys;
for (i = 0; i < nkeys; ++i)
cp->cc_keyno[i] = key[i];
+ cp->cc_oldest_ts = catcacheclock;
/*
* new cache is initialized as far as we can go for now. print some
@@ -1264,6 +1276,9 @@ SearchCatCacheInternal(CatCache *cache,
*/
dlist_move_head(bucket, &ct->cache_elem);
+ /* Record the last access timestamp */
+ ct->lastaccess = catcacheclock;
+
/*
* If it's a positive entry, bump its refcount and return it. If it's
* negative, we can report failure to the caller.
@@ -1425,6 +1440,69 @@ SearchCatCacheMiss(CatCache *cache,
return &ct->tuple;
}
+/*
+ * CatCacheCleanupOldEntries - Remove infrequently-used entries
+ *
+ * Removes entries that has been left alone for a certain period to prevent
+ * catcache bloat.
+ */
+static bool
+CatCacheCleanupOldEntries(CatCache *cp)
+{
+ int nremoved = 0;
+ int i;
+ TimestampTz oldest_ts = catcacheclock;
+ TimestampTz prune_threshold;
+
+ if (catalog_cache_prune_min_age < 0)
+ return false;
+
+ /* entries older than this time would be removed */
+ prune_threshold = catcacheclock -
+ ((int64) catalog_cache_prune_min_age) * USECS_PER_SEC;
+
+ /* return if we know we have no entry to remove */
+ if (cp->cc_oldest_ts > prune_threshold)
+ return false;
+
+ /* Scan over the whole hash to find entries to remove */
+ for (i = 0 ; i < cp->cc_nbuckets ; i++)
+ {
+ dlist_mutable_iter iter;
+
+ dlist_foreach_modify(iter, &cp->cc_bucket[i])
+ {
+ CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
+
+ /* Don't remove referenced entries */
+ if (ct->refcount == 0 &&
+ (ct->c_list == NULL || ct->c_list->refcount == 0))
+ {
+ if (ct->lastaccess <= prune_threshold)
+ {
+ CatCacheRemoveCTup(cp, ct);
+ nremoved++;
+
+ /* don't let the removed entry update oldest_ts */
+ continue;
+ }
+ }
+
+ /* update the oldest timestamp if the entry remains alive */
+ if (ct->lastaccess < oldest_ts)
+ oldest_ts = ct->lastaccess;
+ }
+ }
+
+ cp->cc_oldest_ts = oldest_ts;
+
+ if (nremoved > 0)
+ elog(DEBUG1, "pruning catalog cache id=%d for %s: removed %d / %d",
+ cp->id, cp->cc_relname, nremoved, cp->cc_ntup + nremoved);
+
+ return nremoved > 0;
+}
+
/*
* ReleaseCatCache
*
@@ -1888,6 +1966,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
ct->dead = false;
ct->negative = negative;
ct->hash_value = hashValue;
+ ct->lastaccess = catcacheclock;
dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
@@ -1899,7 +1978,11 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
* arbitrarily, we enlarge when fill factor > 2.
*/
if (cache->cc_ntup > cache->cc_nbuckets * 2)
- RehashCatCache(cache);
+ {
+ /* try removing old entries before expanding hash */
+ if (!CatCacheCleanupOldEntries(cache))
+ RehashCatCache(cache);
+ }
return ct;
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index eafdb1118e..6a1e52911a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -88,6 +88,7 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/bytea.h"
+#include "utils/catcache.h"
#include "utils/float.h"
#include "utils/guc_tables.h"
#include "utils/memutils.h"
@@ -3445,6 +3446,17 @@ static struct config_int ConfigureNamesInt[] =
NULL, NULL, NULL
},
+ {
+ {"catalog_cache_prune_min_age", PGC_USERSET, RESOURCES_MEM,
+ gettext_noop("System catalog cache entries that are unused more than this seconds are to be removed."),
+ gettext_noop("The value of -1 turns off pruning."),
+ GUC_UNIT_S
+ },
+ &catalog_cache_prune_min_age,
+ 600, -1, INT_MAX,
+ NULL, NULL, NULL
+ },
+
/* End-of-list marker */
{
{NULL, 0, 0, NULL, NULL}, NULL, 0, 0, 0, NULL, NULL, NULL
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index ddc2762eb3..786df7aeda 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -22,6 +22,7 @@
#include "access/htup.h"
#include "access/skey.h"
+#include "datatype/timestamp.h"
#include "lib/ilist.h"
#include "utils/relcache.h"
@@ -61,6 +62,7 @@ typedef struct catcache
slist_node cc_next; /* list link */
ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap
* scans */
+ TimestampTz cc_oldest_ts; /* timestamp of the oldest tuple */
/*
* Keep these at the end, so that compiling catcache.c with CATCACHE_STATS
@@ -119,6 +121,7 @@ typedef struct catctup
bool dead; /* dead but not yet removed? */
bool negative; /* negative cache entry? */
HeapTupleData tuple; /* tuple management header */
+ TimestampTz lastaccess; /* timestamp of the last use */
/*
* The tuple may also be a member of at most one CatCList. (If a single
@@ -189,6 +192,20 @@ typedef struct catcacheheader
/* this extern duplicates utils/memutils.h... */
extern PGDLLIMPORT MemoryContext CacheMemoryContext;
+
+/* for guc.c, not PGDLLPMPORT'ed */
+extern int catalog_cache_prune_min_age;
+
+/* source clock for access timestamp of catcache entries */
+extern TimestampTz catcacheclock;
+
+/* SetCatCacheClock - set catcache timestamp source clock */
+static inline void
+SetCatCacheClock(TimestampTz ts)
+{
+ catcacheclock = ts;
+}
+
extern void CreateCacheMemoryContext(void);
extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid,
--
2.27.0
At Thu, 28 Jan 2021 16:50:44 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in
> I was going to write in the doc something like "you can inspect memory
> consumption by catalog caches using pg_backend_memory_contexts", but
> all the memory used by catalog cache is in CacheMemoryContext. Is it
> sensible for each catalog cache to have their own contexts?
Something like this.
regards.
--
Kyotaro Horiguchi
NTT Open Source Software Center
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index fa2b49c676..cfbb335bb3 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -769,6 +769,8 @@ InitCatCache(int id,
{
CatCache *cp;
MemoryContext oldcxt;
+ MemoryContext mycxt;
+ char name[32];
size_t sz;
int i;
@@ -792,7 +794,12 @@ InitCatCache(int id,
if (!CacheMemoryContext)
CreateCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ mycxt = AllocSetContextCreate(CacheMemoryContext, "catcache",
+ ALLOCSET_DEFAULT_SIZES);
+
+ snprintf(name, sizeof(name), "catcache id %d", id);
+ oldcxt = MemoryContextSwitchTo(mycxt);
+ MemoryContextSetIdentifier(mycxt, (const char *)pstrdup(name));
/*
* if first time through, initialize the cache group header
@@ -833,6 +840,7 @@ InitCatCache(int id,
cp->cc_nkeys = nkeys;
for (i = 0; i < nkeys; ++i)
cp->cc_keyno[i] = key[i];
+ cp->cc_mcxt = mycxt;
/*
* new cache is initialized as far as we can go for now. print some
@@ -932,12 +940,12 @@ CatalogCacheInitializeCache(CatCache *cache)
relation = table_open(cache->cc_reloid, AccessShareLock);
/*
- * switch to the cache context so our allocations do not vanish at the end
- * of a transaction
+ * switch to our own context under the cache context so our allocations do
+ * not vanish at the end of a transaction
*/
Assert(CacheMemoryContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(cache->cc_mcxt);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -998,7 +1006,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ cache->cc_mcxt);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1697,7 +1705,7 @@ SearchCatCacheList(CatCache *cache,
table_close(relation, AccessShareLock);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(cache->cc_mcxt);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -1830,7 +1838,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(cache->cc_mcxt);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -1865,7 +1873,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
else
{
Assert(negative);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(cache->cc_mcxt);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index ddc2762eb3..a32fea2f11 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -61,6 +61,7 @@ typedef struct catcache
slist_node cc_next; /* list link */
ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap
* scans */
+ MemoryContext cc_mcxt; /* memory context for this cache */
/*
* Keep these at the end, so that compiling catcache.c with CATCACHE_STATS
On Thu, Jan 28, 2021 at 05:16:52PM +0900, Kyotaro Horiguchi wrote:
> At Thu, 28 Jan 2021 16:50:44 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in
> > I was going to write in the doc something like "you can inspect memory
> > consumption by catalog caches using pg_backend_memory_contexts", but
> > all the memory used by catalog cache is in CacheMemoryContext. Is it
> > sensible for each catalog cache to have their own contexts?
>
> Something like this.
Is this feature not going to make it into PG 14? It first appeared in
the January, 2017 commitfest:
https://commitfest.postgresql.org/32/931/
--
Bruce Momjian <bruce@momjian.us> https://momjian.us
EDB https://enterprisedb.com
If only the physical world exists, free will is an illusion.
At Mon, 22 Mar 2021 13:12:10 -0400, Bruce Momjian <bruce@momjian.us> wrote in > On Thu, Jan 28, 2021 at 05:16:52PM +0900, Kyotaro Horiguchi wrote: > > At Thu, 28 Jan 2021 16:50:44 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in > > > I was going to write in the doc something like "you can inspect memory > > > consumption by catalog caches using pg_backend_memory_contexts", but > > > all the memory used by catalog cache is in CacheMemoryContext. Is it > > > sensible for each catalog cache to have their own contexts? > > > > Something like this. > > Is this feature not going to make it into PG 14? It first appeared in > the January, 2017 commitfest: > > https://commitfest.postgresql.org/32/931/ Thank you for looking this. However, I'm afraid that you are looking to a patch which is not a part of the project in CF, "Protect syscache <blah>". I'm happy if it is committed. It is intending not only to show more meaningful information by pg_get_backend_memory_contexts(), but also to make it easy to investigate what kind of cache is bloating or something like that. With the patch, the functions shows individual context information lines for catcaches. > postgres=# select pg_get_backend_memory_contexts(); ... > (catcache,"catcache id 78",CacheMemoryContext,2,8192,1,6152,0,2040) > (catcache,"catcache id 77",CacheMemoryContext,2,8192,1,6152,0,2040) > (catcache,"catcache id 76",CacheMemoryContext,2,16384,2,7592,3,8792) Applying catcachecxt_by_name.patch.txt on top of it changes the output as the following. The names are not familiar to users, but give far clearer information. > (catcache,USERMAPPINGUSERSERVER,CacheMemoryContext,2,8192,1,6192,0,2000) > (catcache,USERMAPPINGOID,CacheMemoryContext,2,8192,1,6192,0,2000) > (catcache,TYPEOID,CacheMemoryContext,2,16384,2,7632,0,8752) Applying catcachecxt_by_name_id.patch.xt on top of the _by_name.patch, the output further changes as the following. > (catcache,USERMAPPINGUSERSERVER[78],CacheMemoryContext,2,8192,1,6136,0,2056) > (catcache,USERMAPPINGOID[77],CacheMemoryContext,2,8192,1,6136,0,2056) > (catcache,TYPEOID[76],CacheMemoryContext,2,16384,2,7592,3,8792) The number enclosed by brackets is cache id. It is useles for users but convenient for debugging:p catcache_individual_mcxt_2.patch.txt: rebased version of per-catcache context. catcachecxt_by_name.patch.txt: gives a meaningful name to catcache contexts. catcachecxt_by_name_id.patch.txt: and adds cache id. regards. -- Kyotaro Horiguchi NTT Open Source Software Center diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 55c9445898..7d318cf7aa 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -769,6 +769,8 @@ InitCatCache(int id, { CatCache *cp; MemoryContext oldcxt; + MemoryContext mycxt; + char name[32]; size_t sz; int i; @@ -792,7 +794,12 @@ InitCatCache(int id, if (!CacheMemoryContext) CreateCacheMemoryContext(); - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + mycxt = AllocSetContextCreate(CacheMemoryContext, "catcache", + ALLOCSET_DEFAULT_SIZES); + + snprintf(name, sizeof(name), "catcache id %d", id); + oldcxt = MemoryContextSwitchTo(mycxt); + MemoryContextSetIdentifier(mycxt, (const char *)pstrdup(name)); /* * if first time through, initialize the cache group header @@ -833,6 +840,7 @@ InitCatCache(int id, cp->cc_nkeys = nkeys; for (i = 0; i < nkeys; ++i) cp->cc_keyno[i] = key[i]; + cp->cc_mcxt = mycxt; /* * new cache is initialized as far as we can go for now. print some @@ -932,12 +940,12 @@ CatalogCacheInitializeCache(CatCache *cache) relation = table_open(cache->cc_reloid, AccessShareLock); /* - * switch to the cache context so our allocations do not vanish at the end - * of a transaction + * switch to our own context under the cache context so our allocations do + * not vanish at the end of a transaction */ - Assert(CacheMemoryContext != NULL); + Assert(CacheMemoryContext != NULL && cache->cc_mcxt != NULL); - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(cache->cc_mcxt); /* * copy the relcache's tuple descriptor to permanent cache storage @@ -998,7 +1006,7 @@ CatalogCacheInitializeCache(CatCache *cache) */ fmgr_info_cxt(eqfunc, &cache->cc_skey[i].sk_func, - CacheMemoryContext); + cache->cc_mcxt); /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */ cache->cc_skey[i].sk_attno = cache->cc_keyno[i]; @@ -1697,7 +1705,7 @@ SearchCatCacheList(CatCache *cache, table_close(relation, AccessShareLock); /* Now we can build the CatCList entry. */ - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(cache->cc_mcxt); nmembers = list_length(ctlist); cl = (CatCList *) palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *)); @@ -1830,7 +1838,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, dtp = ntp; /* Allocate memory for CatCTup and the cached tuple in one go */ - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(cache->cc_mcxt); ct = (CatCTup *) palloc(sizeof(CatCTup) + MAXIMUM_ALIGNOF + dtp->t_len); @@ -1865,7 +1873,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, else { Assert(negative); - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(cache->cc_mcxt); ct = (CatCTup *) palloc(sizeof(CatCTup)); /* diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index ddc2762eb3..a32fea2f11 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -61,6 +61,7 @@ typedef struct catcache slist_node cc_next; /* list link */ ScanKeyData cc_skey[CATCACHE_MAXKEYS]; /* precomputed key info for heap * scans */ + MemoryContext cc_mcxt; /* memory context for this cache */ /* * Keep these at the end, so that compiling catcache.c with CATCACHE_STATS diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 7d318cf7aa..ffc7fac63b 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -763,6 +763,7 @@ CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, + char *idstr, int nkeys, const int *key, int nbuckets) @@ -770,7 +771,6 @@ InitCatCache(int id, CatCache *cp; MemoryContext oldcxt; MemoryContext mycxt; - char name[32]; size_t sz; int i; @@ -797,9 +797,10 @@ InitCatCache(int id, mycxt = AllocSetContextCreate(CacheMemoryContext, "catcache", ALLOCSET_DEFAULT_SIZES); - snprintf(name, sizeof(name), "catcache id %d", id); oldcxt = MemoryContextSwitchTo(mycxt); - MemoryContextSetIdentifier(mycxt, (const char *)pstrdup(name)); + + /* we can use idstr without copying since the caller gave us a const */ + MemoryContextSetIdentifier(mycxt, idstr); /* * if first time through, initialize the cache group header diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index e4dc4ee34e..0fdcb71911 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -115,14 +115,16 @@ struct cachedesc { Oid reloid; /* OID of the relation being cached */ Oid indoid; /* OID of index relation for this cache */ + char *cacheid; /* identifier string */ int nkeys; /* # of keys needed for cache lookup */ int key[4]; /* attribute numbers of key attrs */ int nbuckets; /* number of hash buckets for this cache */ }; static const struct cachedesc cacheinfo[] = { - {AggregateRelationId, /* AGGFNOID */ + {AggregateRelationId, AggregateFnoidIndexId, + "AGGFNOID", 1, { Anum_pg_aggregate_aggfnoid, @@ -132,8 +134,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {AccessMethodRelationId, /* AMNAME */ + {AccessMethodRelationId, AmNameIndexId, + "AMNAME", 1, { Anum_pg_am_amname, @@ -143,8 +146,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {AccessMethodRelationId, /* AMOID */ + {AccessMethodRelationId, AmOidIndexId, + "AMOID", 1, { Anum_pg_am_oid, @@ -154,8 +158,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {AccessMethodOperatorRelationId, /* AMOPOPID */ + {AccessMethodOperatorRelationId, AccessMethodOperatorIndexId, + "AMOPOPID", 3, { Anum_pg_amop_amopopr, @@ -165,8 +170,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {AccessMethodOperatorRelationId, /* AMOPSTRATEGY */ + {AccessMethodOperatorRelationId, AccessMethodStrategyIndexId, + "AMOPSTRATEGY", 4, { Anum_pg_amop_amopfamily, @@ -176,8 +182,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {AccessMethodProcedureRelationId, /* AMPROCNUM */ + {AccessMethodProcedureRelationId, AccessMethodProcedureIndexId, + "AMPROCNUM", 4, { Anum_pg_amproc_amprocfamily, @@ -187,8 +194,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {AttributeRelationId, /* ATTNAME */ + {AttributeRelationId, AttributeRelidNameIndexId, + "ATTNAME", 2, { Anum_pg_attribute_attrelid, @@ -198,8 +206,9 @@ static const struct cachedesc cacheinfo[] = { }, 32 }, - {AttributeRelationId, /* ATTNUM */ + {AttributeRelationId, AttributeRelidNumIndexId, + "ATTNUM", 2, { Anum_pg_attribute_attrelid, @@ -209,8 +218,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {AuthMemRelationId, /* AUTHMEMMEMROLE */ + {AuthMemRelationId, AuthMemMemRoleIndexId, + "AUTHMEMMEMROLE", 2, { Anum_pg_auth_members_member, @@ -220,8 +230,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {AuthMemRelationId, /* AUTHMEMROLEMEM */ + {AuthMemRelationId, AuthMemRoleMemIndexId, + "AUTHMEMROLEMEM", 2, { Anum_pg_auth_members_roleid, @@ -231,8 +242,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {AuthIdRelationId, /* AUTHNAME */ + {AuthIdRelationId, AuthIdRolnameIndexId, + "AUTHNAME", 1, { Anum_pg_authid_rolname, @@ -242,8 +254,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {AuthIdRelationId, /* AUTHOID */ + {AuthIdRelationId, AuthIdOidIndexId, + "AUTHOID", 1, { Anum_pg_authid_oid, @@ -254,8 +267,9 @@ static const struct cachedesc cacheinfo[] = { 8 }, { - CastRelationId, /* CASTSOURCETARGET */ + CastRelationId, CastSourceTargetIndexId, + "CASTSOURCETARGET", 2, { Anum_pg_cast_castsource, @@ -265,8 +279,9 @@ static const struct cachedesc cacheinfo[] = { }, 256 }, - {OperatorClassRelationId, /* CLAAMNAMENSP */ + {OperatorClassRelationId, OpclassAmNameNspIndexId, + "CLAAMNAMENSP", 3, { Anum_pg_opclass_opcmethod, @@ -276,8 +291,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {OperatorClassRelationId, /* CLAOID */ + {OperatorClassRelationId, OpclassOidIndexId, + "CLAOID", 1, { Anum_pg_opclass_oid, @@ -287,8 +303,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {CollationRelationId, /* COLLNAMEENCNSP */ + {CollationRelationId, CollationNameEncNspIndexId, + "COLLNAMEENCNSP", 3, { Anum_pg_collation_collname, @@ -298,8 +315,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {CollationRelationId, /* COLLOID */ + {CollationRelationId, CollationOidIndexId, + "COLLOID", 1, { Anum_pg_collation_oid, @@ -309,8 +327,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {ConversionRelationId, /* CONDEFAULT */ + {ConversionRelationId, ConversionDefaultIndexId, + "CONDEFAULT", 4, { Anum_pg_conversion_connamespace, @@ -320,8 +339,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {ConversionRelationId, /* CONNAMENSP */ + {ConversionRelationId, ConversionNameNspIndexId, + "CONNAMENSP", 2, { Anum_pg_conversion_conname, @@ -331,8 +351,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {ConstraintRelationId, /* CONSTROID */ + {ConstraintRelationId, ConstraintOidIndexId, + "CONSTROID", 1, { Anum_pg_constraint_oid, @@ -342,8 +363,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {ConversionRelationId, /* CONVOID */ + {ConversionRelationId, ConversionOidIndexId, + "CONVOID", 1, { Anum_pg_conversion_oid, @@ -353,8 +375,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {DatabaseRelationId, /* DATABASEOID */ + {DatabaseRelationId, DatabaseOidIndexId, + "DATABASEOID", 1, { Anum_pg_database_oid, @@ -364,8 +387,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {DefaultAclRelationId, /* DEFACLROLENSPOBJ */ + {DefaultAclRelationId, DefaultAclRoleNspObjIndexId, + "DEFACLROLENSPOBJ", 3, { Anum_pg_default_acl_defaclrole, @@ -375,8 +399,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {EnumRelationId, /* ENUMOID */ + {EnumRelationId, EnumOidIndexId, + "ENUMOID", 1, { Anum_pg_enum_oid, @@ -386,8 +411,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {EnumRelationId, /* ENUMTYPOIDNAME */ + {EnumRelationId, EnumTypIdLabelIndexId, + "ENUMTYPOIDNAME", 2, { Anum_pg_enum_enumtypid, @@ -397,8 +423,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {EventTriggerRelationId, /* EVENTTRIGGERNAME */ + {EventTriggerRelationId, EventTriggerNameIndexId, + "EVENTTRIGGERNAME", 1, { Anum_pg_event_trigger_evtname, @@ -408,8 +435,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {EventTriggerRelationId, /* EVENTTRIGGEROID */ + {EventTriggerRelationId, EventTriggerOidIndexId, + "EVENTTRIGGEROID", 1, { Anum_pg_event_trigger_oid, @@ -419,8 +447,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {ForeignDataWrapperRelationId, /* FOREIGNDATAWRAPPERNAME */ + {ForeignDataWrapperRelationId, ForeignDataWrapperNameIndexId, + "FOREIGNDATAWRAPPERNAME", 1, { Anum_pg_foreign_data_wrapper_fdwname, @@ -430,8 +459,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {ForeignDataWrapperRelationId, /* FOREIGNDATAWRAPPEROID */ + {ForeignDataWrapperRelationId, ForeignDataWrapperOidIndexId, + "FOREIGNDATAWRAPPEROID", 1, { Anum_pg_foreign_data_wrapper_oid, @@ -441,8 +471,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {ForeignServerRelationId, /* FOREIGNSERVERNAME */ + {ForeignServerRelationId, ForeignServerNameIndexId, + "FOREIGNSERVERNAME", 1, { Anum_pg_foreign_server_srvname, @@ -452,8 +483,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {ForeignServerRelationId, /* FOREIGNSERVEROID */ + {ForeignServerRelationId, ForeignServerOidIndexId, + "FOREIGNSERVEROID", 1, { Anum_pg_foreign_server_oid, @@ -463,8 +495,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {ForeignTableRelationId, /* FOREIGNTABLEREL */ + {ForeignTableRelationId, ForeignTableRelidIndexId, + "FOREIGNTABLEREL", 1, { Anum_pg_foreign_table_ftrelid, @@ -474,8 +507,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {IndexRelationId, /* INDEXRELID */ + {IndexRelationId, IndexRelidIndexId, + "INDEXRELID", 1, { Anum_pg_index_indexrelid, @@ -485,8 +519,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {LanguageRelationId, /* LANGNAME */ + {LanguageRelationId, LanguageNameIndexId, + "LANGNAME", 1, { Anum_pg_language_lanname, @@ -496,8 +531,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {LanguageRelationId, /* LANGOID */ + {LanguageRelationId, LanguageOidIndexId, + "LANGOID", 1, { Anum_pg_language_oid, @@ -507,8 +543,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {NamespaceRelationId, /* NAMESPACENAME */ + {NamespaceRelationId, NamespaceNameIndexId, + "NAMESPACENAME", 1, { Anum_pg_namespace_nspname, @@ -518,8 +555,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {NamespaceRelationId, /* NAMESPACEOID */ + {NamespaceRelationId, NamespaceOidIndexId, + "NAMESPACEOID", 1, { Anum_pg_namespace_oid, @@ -529,8 +567,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {OperatorRelationId, /* OPERNAMENSP */ + {OperatorRelationId, OperatorNameNspIndexId, + "OPERNAMENSP", 4, { Anum_pg_operator_oprname, @@ -540,8 +579,9 @@ static const struct cachedesc cacheinfo[] = { }, 256 }, - {OperatorRelationId, /* OPEROID */ + {OperatorRelationId, OperatorOidIndexId, + "OPEROID", 1, { Anum_pg_operator_oid, @@ -551,8 +591,9 @@ static const struct cachedesc cacheinfo[] = { }, 32 }, - {OperatorFamilyRelationId, /* OPFAMILYAMNAMENSP */ + {OperatorFamilyRelationId, OpfamilyAmNameNspIndexId, + "OPFAMILYAMNAMENSP", 3, { Anum_pg_opfamily_opfmethod, @@ -562,8 +603,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {OperatorFamilyRelationId, /* OPFAMILYOID */ + {OperatorFamilyRelationId, OpfamilyOidIndexId, + "OPFAMILYOID", 1, { Anum_pg_opfamily_oid, @@ -573,8 +615,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PartitionedRelationId, /* PARTRELID */ + {PartitionedRelationId, PartitionedRelidIndexId, + "PARTRELID", 1, { Anum_pg_partitioned_table_partrelid, @@ -584,8 +627,9 @@ static const struct cachedesc cacheinfo[] = { }, 32 }, - {ProcedureRelationId, /* PROCNAMEARGSNSP */ + {ProcedureRelationId, ProcedureNameArgsNspIndexId, + "PROCNAMEARGSNSP", 3, { Anum_pg_proc_proname, @@ -595,8 +639,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {ProcedureRelationId, /* PROCOID */ + {ProcedureRelationId, ProcedureOidIndexId, + "PROCOID", 1, { Anum_pg_proc_oid, @@ -606,8 +651,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {PublicationRelationId, /* PUBLICATIONNAME */ + {PublicationRelationId, PublicationNameIndexId, + "PUBLICATIONNAME", 1, { Anum_pg_publication_pubname, @@ -617,8 +663,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelationId, /* PUBLICATIONOID */ + {PublicationRelationId, PublicationObjectIndexId, + "PUBLICATIONOID", 1, { Anum_pg_publication_oid, @@ -628,8 +675,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelRelationId, /* PUBLICATIONREL */ + {PublicationRelRelationId, PublicationRelObjectIndexId, + "PUBLICATIONREL", 1, { Anum_pg_publication_rel_oid, @@ -639,8 +687,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {PublicationRelRelationId, /* PUBLICATIONRELMAP */ + {PublicationRelRelationId, PublicationRelPrrelidPrpubidIndexId, + "PUBLICATIONRELMAP", 2, { Anum_pg_publication_rel_prrelid, @@ -650,8 +699,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {RangeRelationId, /* RANGEMULTIRANGE */ + {RangeRelationId, RangeMultirangeTypidIndexId, + "RANGEMULTIRANGE", 1, { Anum_pg_range_rngmultitypid, @@ -662,8 +712,9 @@ static const struct cachedesc cacheinfo[] = { 4 }, - {RangeRelationId, /* RANGETYPE */ + {RangeRelationId, RangeTypidIndexId, + "RANGETYPE", 1, { Anum_pg_range_rngtypid, @@ -673,8 +724,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {RelationRelationId, /* RELNAMENSP */ + {RelationRelationId, ClassNameNspIndexId, + "RELNAMENSP", 2, { Anum_pg_class_relname, @@ -684,8 +736,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {RelationRelationId, /* RELOID */ + {RelationRelationId, ClassOidIndexId, + "RELOID", 1, { Anum_pg_class_oid, @@ -695,8 +748,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {ReplicationOriginRelationId, /* REPLORIGIDENT */ + {ReplicationOriginRelationId, ReplicationOriginIdentIndex, + "REPLORIGIDENT", 1, { Anum_pg_replication_origin_roident, @@ -706,8 +760,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {ReplicationOriginRelationId, /* REPLORIGNAME */ + {ReplicationOriginRelationId, ReplicationOriginNameIndex, + "REPLORIGNAME", 1, { Anum_pg_replication_origin_roname, @@ -717,8 +772,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {RewriteRelationId, /* RULERELNAME */ + {RewriteRelationId, RewriteRelRulenameIndexId, + "RULERELNAME", 2, { Anum_pg_rewrite_ev_class, @@ -728,8 +784,9 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {SequenceRelationId, /* SEQRELID */ + {SequenceRelationId, SequenceRelidIndexId, + "SEQRELID", 1, { Anum_pg_sequence_seqrelid, @@ -739,8 +796,9 @@ static const struct cachedesc cacheinfo[] = { }, 32 }, - {StatisticExtDataRelationId, /* STATEXTDATASTXOID */ + {StatisticExtDataRelationId, StatisticExtDataStxoidIndexId, + "STATEXTDATASTXOID", 1, { Anum_pg_statistic_ext_data_stxoid, @@ -750,8 +808,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {StatisticExtRelationId, /* STATEXTNAMENSP */ + {StatisticExtRelationId, StatisticExtNameIndexId, + "STATEXTNAMENSP", 2, { Anum_pg_statistic_ext_stxname, @@ -761,8 +820,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {StatisticExtRelationId, /* STATEXTOID */ + {StatisticExtRelationId, StatisticExtOidIndexId, + "STATEXTOID", 1, { Anum_pg_statistic_ext_oid, @@ -772,8 +832,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {StatisticRelationId, /* STATRELATTINH */ + {StatisticRelationId, StatisticRelidAttnumInhIndexId, + "STATRELATTINH", 3, { Anum_pg_statistic_starelid, @@ -783,8 +844,9 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ + {SubscriptionRelationId, SubscriptionNameIndexId, + "SUBSCRIPTIONNAME", 2, { Anum_pg_subscription_subdbid, @@ -794,8 +856,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelationId, /* SUBSCRIPTIONOID */ + {SubscriptionRelationId, SubscriptionObjectIndexId, + "SUBSCRIPTIONOID", 1, { Anum_pg_subscription_oid, @@ -805,8 +868,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */ + {SubscriptionRelRelationId, SubscriptionRelSrrelidSrsubidIndexId, + "SUBSCRIPTIONRELMAP", 2, { Anum_pg_subscription_rel_srrelid, @@ -816,8 +880,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {TableSpaceRelationId, /* TABLESPACEOID */ + {TableSpaceRelationId, TablespaceOidIndexId, + "TABLESPACEOID", 1, { Anum_pg_tablespace_oid, @@ -827,8 +892,9 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {TransformRelationId, /* TRFOID */ + {TransformRelationId, TransformOidIndexId, + "TRFOID", 1, { Anum_pg_transform_oid, @@ -838,8 +904,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {TransformRelationId, /* TRFTYPELANG */ + {TransformRelationId, TransformTypeLangIndexId, + "TRFTYPELANG", 2, { Anum_pg_transform_trftype, @@ -849,8 +916,9 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {TSConfigMapRelationId, /* TSCONFIGMAP */ + {TSConfigMapRelationId, TSConfigMapIndexId, + "TSCONFIGMAP", 3, { Anum_pg_ts_config_map_mapcfg, @@ -860,8 +928,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSConfigRelationId, /* TSCONFIGNAMENSP */ + {TSConfigRelationId, TSConfigNameNspIndexId, + "TSCONFIGNAMENSP", 2, { Anum_pg_ts_config_cfgname, @@ -871,8 +940,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSConfigRelationId, /* TSCONFIGOID */ + {TSConfigRelationId, TSConfigOidIndexId, + "TSCONFIGOID", 1, { Anum_pg_ts_config_oid, @@ -882,8 +952,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSDictionaryRelationId, /* TSDICTNAMENSP */ + {TSDictionaryRelationId, TSDictionaryNameNspIndexId, + "TSDICTNAMENSP", 2, { Anum_pg_ts_dict_dictname, @@ -893,8 +964,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSDictionaryRelationId, /* TSDICTOID */ + {TSDictionaryRelationId, TSDictionaryOidIndexId, + "TSDICTOID", 1, { Anum_pg_ts_dict_oid, @@ -904,8 +976,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSParserRelationId, /* TSPARSERNAMENSP */ + {TSParserRelationId, TSParserNameNspIndexId, + "TSPARSERNAMENSP", 2, { Anum_pg_ts_parser_prsname, @@ -915,8 +988,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSParserRelationId, /* TSPARSEROID */ + {TSParserRelationId, TSParserOidIndexId, + "TSPARSEROID", 1, { Anum_pg_ts_parser_oid, @@ -926,8 +1000,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSTemplateRelationId, /* TSTEMPLATENAMENSP */ + {TSTemplateRelationId, TSTemplateNameNspIndexId, + "TSTEMPLATENAMENSP", 2, { Anum_pg_ts_template_tmplname, @@ -937,8 +1012,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TSTemplateRelationId, /* TSTEMPLATEOID */ + {TSTemplateRelationId, TSTemplateOidIndexId, + "TSTEMPLATEOID", 1, { Anum_pg_ts_template_oid, @@ -948,8 +1024,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {TypeRelationId, /* TYPENAMENSP */ + {TypeRelationId, TypeNameNspIndexId, + "TYPENAMENSP", 2, { Anum_pg_type_typname, @@ -959,8 +1036,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {TypeRelationId, /* TYPEOID */ + {TypeRelationId, TypeOidIndexId, + "TYPEOID", 1, { Anum_pg_type_oid, @@ -970,8 +1048,9 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {UserMappingRelationId, /* USERMAPPINGOID */ + {UserMappingRelationId, UserMappingOidIndexId, + "USERMAPPINGOID", 1, { Anum_pg_user_mapping_oid, @@ -981,8 +1060,9 @@ static const struct cachedesc cacheinfo[] = { }, 2 }, - {UserMappingRelationId, /* USERMAPPINGUSERSERVER */ + {UserMappingRelationId, UserMappingUserServerIndexId, + "USERMAPPINGUSERSERVER", 2, { Anum_pg_user_mapping_umuser, @@ -1034,6 +1114,7 @@ InitCatalogCache(void) SysCache[cacheId] = InitCatCache(cacheId, cacheinfo[cacheId].reloid, cacheinfo[cacheId].indoid, + cacheinfo[cacheId].cacheid, cacheinfo[cacheId].nkeys, cacheinfo[cacheId].key, cacheinfo[cacheId].nbuckets); diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index a32fea2f11..d73cd1909e 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -192,7 +192,7 @@ extern PGDLLIMPORT MemoryContext CacheMemoryContext; extern void CreateCacheMemoryContext(void); -extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid, +extern CatCache *InitCatCache(int id, Oid reloid, Oid indexoid, char* idstr, int nkeys, const int *key, int nbuckets); extern void InitCatCachePhase2(CatCache *cache, bool touch_index); diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index ffc7fac63b..116db43f7c 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -771,6 +771,7 @@ InitCatCache(int id, CatCache *cp; MemoryContext oldcxt; MemoryContext mycxt; + char buf[32]; size_t sz; int i; @@ -799,8 +800,8 @@ InitCatCache(int id, oldcxt = MemoryContextSwitchTo(mycxt); - /* we can use idstr without copying since the caller gave us a const */ - MemoryContextSetIdentifier(mycxt, idstr); + snprintf(buf, 32, "%s[%d]", idstr, id); + MemoryContextSetIdentifier(mycxt, (const char *) pstrdup(buf)); /* * if first time through, initialize the cache group header