Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/sysattr.h"
21 : #include "access/table.h"
22 : #include "access/xact.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_operator.h"
25 : #include "catalog/pg_type.h"
26 : #include "common/hashfn.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/datum.h"
35 : #include "utils/fmgroids.h"
36 : #include "utils/inval.h"
37 : #include "utils/memutils.h"
38 : #include "utils/rel.h"
39 : #include "utils/resowner_private.h"
40 : #include "utils/syscache.h"
41 :
42 :
43 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
44 :
45 : /*
46 : * Given a hash value and the size of the hash table, find the bucket
47 : * in which the hash value belongs. Since the hash table must contain
48 : * a power-of-2 number of elements, this is a simple bitmask.
49 : */
50 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51 :
52 :
53 : /*
54 : * variables, macros and other stuff
55 : */
56 :
57 : #ifdef CACHEDEBUG
58 : #define CACHE_elog(...) elog(__VA_ARGS__)
59 : #else
60 : #define CACHE_elog(...)
61 : #endif
62 :
63 : /* Cache management header --- pointer is NULL until created */
64 : static CatCacheHeader *CacheHdr = NULL;
65 :
66 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
67 : int nkeys,
68 : Datum v1, Datum v2,
69 : Datum v3, Datum v4);
70 :
71 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
72 : int nkeys,
73 : uint32 hashValue,
74 : Index hashIndex,
75 : Datum v1, Datum v2,
76 : Datum v3, Datum v4);
77 :
78 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
79 : Datum v1, Datum v2, Datum v3, Datum v4);
80 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
81 : HeapTuple tuple);
82 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
83 : const Datum *cachekeys,
84 : const Datum *searchkeys);
85 :
86 : #ifdef CATCACHE_STATS
87 : static void CatCachePrintStats(int code, Datum arg);
88 : #endif
89 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
90 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
91 : static void CatalogCacheInitializeCache(CatCache *cache);
92 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
93 : Datum *arguments,
94 : uint32 hashValue, Index hashIndex,
95 : bool negative);
96 :
97 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
98 : Datum *keys);
99 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
100 : Datum *srckeys, Datum *dstkeys);
101 :
102 :
103 : /*
104 : * internal support functions
105 : */
106 :
107 : /*
108 : * Hash and equality functions for system types that are used as cache key
109 : * fields. In some cases, we just call the regular SQL-callable functions for
110 : * the appropriate data type, but that tends to be a little slow, and the
111 : * speed of these functions is performance-critical. Therefore, for data
112 : * types that frequently occur as catcache keys, we hard-code the logic here.
113 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
114 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
115 : */
116 :
2004 andres 117 ECB : static bool
2004 andres 118 GIC 2019770 : chareqfast(Datum a, Datum b)
2004 andres 119 ECB : {
2004 andres 120 GIC 2019770 : return DatumGetChar(a) == DatumGetChar(b);
121 : }
122 :
2004 andres 123 ECB : static uint32
2004 andres 124 GIC 2549118 : charhashfast(Datum datum)
2004 andres 125 ECB : {
2004 andres 126 GIC 2549118 : return murmurhash32((int32) DatumGetChar(datum));
127 : }
128 :
2004 andres 129 ECB : static bool
2004 andres 130 GIC 2766056 : nameeqfast(Datum a, Datum b)
2004 andres 131 ECB : {
2004 andres 132 CBC 2766056 : char *ca = NameStr(*DatumGetName(a));
2004 andres 133 GIC 2766056 : char *cb = NameStr(*DatumGetName(b));
2004 andres 134 ECB :
2004 andres 135 GIC 2766056 : return strncmp(ca, cb, NAMEDATALEN) == 0;
136 : }
137 :
2004 andres 138 ECB : static uint32
2004 andres 139 GIC 7637711 : namehashfast(Datum datum)
2004 andres 140 ECB : {
2004 andres 141 GIC 7637711 : char *key = NameStr(*DatumGetName(datum));
2004 andres 142 ECB :
2004 andres 143 GIC 7637711 : return hash_any((unsigned char *) key, strlen(key));
144 : }
145 :
2004 andres 146 ECB : static bool
2004 andres 147 GIC 3502241 : int2eqfast(Datum a, Datum b)
2004 andres 148 ECB : {
2004 andres 149 GIC 3502241 : return DatumGetInt16(a) == DatumGetInt16(b);
150 : }
151 :
2004 andres 152 ECB : static uint32
2004 andres 153 GIC 5524920 : int2hashfast(Datum datum)
2004 andres 154 ECB : {
2004 andres 155 GIC 5524920 : return murmurhash32((int32) DatumGetInt16(datum));
156 : }
157 :
2004 andres 158 ECB : static bool
2004 andres 159 GIC 56673573 : int4eqfast(Datum a, Datum b)
2004 andres 160 ECB : {
2004 andres 161 GIC 56673573 : return DatumGetInt32(a) == DatumGetInt32(b);
162 : }
163 :
2004 andres 164 ECB : static uint32
2004 andres 165 GIC 70248764 : int4hashfast(Datum datum)
2004 andres 166 ECB : {
2004 andres 167 GIC 70248764 : return murmurhash32((int32) DatumGetInt32(datum));
168 : }
169 :
2004 andres 170 ECB : static bool
2004 andres 171 GIC 85 : texteqfast(Datum a, Datum b)
172 : {
173 : /*
174 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
175 : * want to take the fast "deterministic" path in texteq().
1479 peter 176 ECB : */
1479 peter 177 GIC 85 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
178 : }
179 :
2004 andres 180 ECB : static uint32
2004 andres 181 GIC 1480 : texthashfast(Datum datum)
182 : {
1479 peter 183 ECB : /* analogously here as in texteqfast() */
1479 peter 184 GIC 1480 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
185 : }
186 :
2004 andres 187 ECB : static bool
2004 andres 188 GIC 7348 : oidvectoreqfast(Datum a, Datum b)
2004 andres 189 ECB : {
2004 andres 190 GIC 7348 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
191 : }
192 :
2004 andres 193 ECB : static uint32
2004 andres 194 GIC 440667 : oidvectorhashfast(Datum datum)
2004 andres 195 ECB : {
2004 andres 196 GIC 440667 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
197 : }
198 :
199 : /* Lookup support functions for a type. */
7231 tgl 200 ECB : static void
2004 andres 201 GIC 421767 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
8448 tgl 202 ECB : {
8448 tgl 203 GIC 421767 : switch (keytype)
8448 tgl 204 ECB : {
7965 tgl 205 CBC 5718 : case BOOLOID:
2004 andres 206 5718 : *hashfunc = charhashfast;
207 5718 : *fasteqfunc = chareqfast;
7231 tgl 208 5718 : *eqfunc = F_BOOLEQ;
209 5718 : break;
7965 210 7710 : case CHAROID:
2004 andres 211 7710 : *hashfunc = charhashfast;
212 7710 : *fasteqfunc = chareqfast;
7231 tgl 213 7710 : *eqfunc = F_CHAREQ;
214 7710 : break;
8448 215 79299 : case NAMEOID:
2004 andres 216 79299 : *hashfunc = namehashfast;
217 79299 : *fasteqfunc = nameeqfast;
7231 tgl 218 79299 : *eqfunc = F_NAMEEQ;
219 79299 : break;
8448 220 24243 : case INT2OID:
2004 andres 221 24243 : *hashfunc = int2hashfast;
222 24243 : *fasteqfunc = int2eqfast;
7231 tgl 223 24243 : *eqfunc = F_INT2EQ;
224 24243 : break;
8448 225 6536 : case INT4OID:
2004 andres 226 6536 : *hashfunc = int4hashfast;
227 6536 : *fasteqfunc = int4eqfast;
7231 tgl 228 6536 : *eqfunc = F_INT4EQ;
229 6536 : break;
8448 230 2888 : case TEXTOID:
2004 andres 231 2888 : *hashfunc = texthashfast;
232 2888 : *fasteqfunc = texteqfast;
7231 tgl 233 2888 : *eqfunc = F_TEXTEQ;
234 2888 : break;
8448 tgl 235 GIC 289998 : case OIDOID:
236 : case REGPROCOID:
237 : case REGPROCEDUREOID:
238 : case REGOPEROID:
239 : case REGOPERATOROID:
240 : case REGCLASSOID:
241 : case REGTYPEOID:
242 : case REGCOLLATIONOID:
243 : case REGCONFIGOID:
244 : case REGDICTIONARYOID:
245 : case REGROLEOID:
2892 andrew 246 ECB : case REGNAMESPACEOID:
2004 andres 247 CBC 289998 : *hashfunc = int4hashfast;
248 289998 : *fasteqfunc = int4eqfast;
7231 tgl 249 289998 : *eqfunc = F_OIDEQ;
250 289998 : break;
8448 251 5375 : case OIDVECTOROID:
2004 andres 252 5375 : *hashfunc = oidvectorhashfast;
253 5375 : *fasteqfunc = oidvectoreqfast;
7231 tgl 254 5375 : *eqfunc = F_OIDVECTOREQ;
7231 tgl 255 GBC 5375 : break;
8448 tgl 256 UBC 0 : default:
7198 tgl 257 UIC 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
258 : *hashfunc = NULL; /* keep compiler quiet */
259 :
260 : *eqfunc = InvalidOid;
261 : break;
8448 tgl 262 ECB : }
8448 tgl 263 GIC 421767 : }
264 :
265 : /*
266 : * CatalogCacheComputeHashValue
267 : *
268 : * Compute the hash value associated with a given set of lookup keys
269 : */
7707 tgl 270 ECB : static uint32
2004 andres 271 GIC 61174845 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
272 : Datum v1, Datum v2, Datum v3, Datum v4)
9770 scrappy 273 ECB : {
7707 tgl 274 GIC 61174845 : uint32 hashValue = 0;
5832 tgl 275 ECB : uint32 oneHash;
2004 andres 276 GIC 61174845 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
277 :
278 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
279 : cache->cc_relname, nkeys, cache);
9345 bruce 280 ECB :
7673 tgl 281 GIC 61174845 : switch (nkeys)
9345 bruce 282 ECB : {
9344 bruce 283 CBC 2104322 : case 4:
2004 andres 284 2104322 : oneHash = (cc_hashfunc[3]) (v4);
413 john.naylor 285 GIC 2104322 : hashValue ^= pg_rotate_left32(oneHash, 24);
9344 bruce 286 ECB : /* FALLTHROUGH */
9344 bruce 287 CBC 6722818 : case 3:
2004 andres 288 6722818 : oneHash = (cc_hashfunc[2]) (v3);
413 john.naylor 289 GIC 6722818 : hashValue ^= pg_rotate_left32(oneHash, 16);
9344 bruce 290 ECB : /* FALLTHROUGH */
9344 bruce 291 CBC 16400675 : case 2:
2004 andres 292 16400675 : oneHash = (cc_hashfunc[1]) (v2);
413 john.naylor 293 GIC 16400675 : hashValue ^= pg_rotate_left32(oneHash, 8);
9344 bruce 294 ECB : /* FALLTHROUGH */
9344 bruce 295 CBC 61174845 : case 1:
2004 andres 296 61174845 : oneHash = (cc_hashfunc[0]) (v1);
5832 tgl 297 61174845 : hashValue ^= oneHash;
9344 bruce 298 GBC 61174845 : break;
9344 bruce 299 UBC 0 : default:
7198 tgl 300 UIC 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
301 : break;
302 : }
7707 tgl 303 ECB :
7707 tgl 304 GIC 61174845 : return hashValue;
305 : }
306 :
307 : /*
308 : * CatalogCacheComputeTupleHashValue
309 : *
310 : * Compute the hash value associated with a given tuple to be cached
311 : */
7707 tgl 312 ECB : static uint32
2004 andres 313 GIC 5848877 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
9770 scrappy 314 ECB : {
2004 andres 315 CBC 5848877 : Datum v1 = 0,
316 5848877 : v2 = 0,
317 5848877 : v3 = 0,
318 5848877 : v4 = 0;
8469 tgl 319 5848877 : bool isNull = false;
2004 andres 320 5848877 : int *cc_keyno = cache->cc_keyno;
2004 andres 321 GIC 5848877 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
322 :
8185 tgl 323 ECB : /* Now extract key fields from tuple, insert into scankey */
2004 andres 324 GIC 5848877 : switch (nkeys)
9345 bruce 325 ECB : {
9344 bruce 326 CBC 220815 : case 4:
1601 andres 327 220815 : v4 = fastgetattr(tuple,
1601 andres 328 GIC 220815 : cc_keyno[3],
329 : cc_tupdesc,
1601 andres 330 ECB : &isNull);
9344 bruce 331 GIC 220815 : Assert(!isNull);
332 : /* FALLTHROUGH */
9344 bruce 333 ECB : case 3:
1601 andres 334 CBC 1476669 : v3 = fastgetattr(tuple,
1601 andres 335 GIC 1476669 : cc_keyno[2],
336 : cc_tupdesc,
1601 andres 337 ECB : &isNull);
9344 bruce 338 GIC 1476669 : Assert(!isNull);
339 : /* FALLTHROUGH */
9344 bruce 340 ECB : case 2:
1601 andres 341 CBC 4230469 : v2 = fastgetattr(tuple,
1601 andres 342 GIC 4230469 : cc_keyno[1],
343 : cc_tupdesc,
1601 andres 344 ECB : &isNull);
9344 bruce 345 GIC 4230469 : Assert(!isNull);
346 : /* FALLTHROUGH */
9344 bruce 347 ECB : case 1:
1601 andres 348 GIC 5848877 : v1 = fastgetattr(tuple,
349 : cc_keyno[0],
350 : cc_tupdesc,
1601 andres 351 ECB : &isNull);
9344 bruce 352 CBC 5848877 : Assert(!isNull);
9344 bruce 353 GBC 5848877 : break;
9344 bruce 354 UBC 0 : default:
2004 andres 355 UIC 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
356 : break;
357 : }
9345 bruce 358 ECB :
2004 andres 359 GIC 5848877 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
360 : }
361 :
362 : /*
363 : * CatalogCacheCompareTuple
364 : *
365 : * Compare a tuple to the passed arguments.
366 : */
2004 andres 367 ECB : static inline bool
2004 andres 368 GIC 50424716 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
369 : const Datum *cachekeys,
370 : const Datum *searchkeys)
2004 andres 371 ECB : {
2004 andres 372 GIC 50424716 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
373 : int i;
2004 andres 374 ECB :
2004 andres 375 GIC 115393789 : for (i = 0; i < nkeys; i++)
2004 andres 376 ECB : {
2004 andres 377 GBC 64969073 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
2004 andres 378 UIC 0 : return false;
2004 andres 379 ECB : }
2004 andres 380 GIC 50424716 : return true;
381 : }
382 :
383 :
384 : #ifdef CATCACHE_STATS
385 :
386 : static void
387 : CatCachePrintStats(int code, Datum arg)
388 : {
389 : slist_iter iter;
390 : long cc_searches = 0;
391 : long cc_hits = 0;
392 : long cc_neg_hits = 0;
393 : long cc_newloads = 0;
394 : long cc_invals = 0;
395 : long cc_lsearches = 0;
396 : long cc_lhits = 0;
397 :
398 : slist_foreach(iter, &CacheHdr->ch_caches)
399 : {
400 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
401 :
402 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
403 : continue; /* don't print unused caches */
404 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
405 : cache->cc_relname,
406 : cache->cc_indexoid,
407 : cache->cc_ntup,
408 : cache->cc_searches,
409 : cache->cc_hits,
410 : cache->cc_neg_hits,
411 : cache->cc_hits + cache->cc_neg_hits,
412 : cache->cc_newloads,
413 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
414 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
415 : cache->cc_invals,
416 : cache->cc_lsearches,
417 : cache->cc_lhits);
418 : cc_searches += cache->cc_searches;
419 : cc_hits += cache->cc_hits;
420 : cc_neg_hits += cache->cc_neg_hits;
421 : cc_newloads += cache->cc_newloads;
422 : cc_invals += cache->cc_invals;
423 : cc_lsearches += cache->cc_lsearches;
424 : cc_lhits += cache->cc_lhits;
425 : }
426 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
427 : CacheHdr->ch_ntup,
428 : cc_searches,
429 : cc_hits,
430 : cc_neg_hits,
431 : cc_hits + cc_neg_hits,
432 : cc_newloads,
433 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
434 : cc_searches - cc_hits - cc_neg_hits,
435 : cc_invals,
436 : cc_lsearches,
437 : cc_lhits);
438 : }
439 : #endif /* CATCACHE_STATS */
440 :
441 :
442 : /*
443 : * CatCacheRemoveCTup
444 : *
445 : * Unlink and delete the given cache entry
446 : *
447 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
448 : * Both the cache entry and the list had better have zero refcount.
449 : */
9364 bruce 450 ECB : static void
8179 tgl 451 GIC 1617512 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
9770 scrappy 452 ECB : {
8179 tgl 453 CBC 1617512 : Assert(ct->refcount == 0);
7965 tgl 454 GIC 1617512 : Assert(ct->my_cache == cache);
8469 tgl 455 ECB :
7673 tgl 456 GIC 1617512 : if (ct->c_list)
457 : {
458 : /*
459 : * The cleanest way to handle this is to call CatCacheRemoveCList,
460 : * which will recurse back to me, and the recursive call will do the
461 : * work. Set the "dead" flag to make sure it does recurse.
6301 tgl 462 EUB : */
6301 tgl 463 UBC 0 : ct->dead = true;
7673 464 0 : CatCacheRemoveCList(cache, ct->c_list);
6301 tgl 465 UIC 0 : return; /* nothing left to do */
466 : }
467 :
3825 tgl 468 ECB : /* delink from linked list */
3825 tgl 469 GIC 1617512 : dlist_delete(&ct->cache_elem);
470 :
471 : /*
472 : * Free keys when we're dealing with a negative entry, normal entries just
473 : * point into tuple, allocated together with the CatCTup.
2004 andres 474 ECB : */
2004 andres 475 CBC 1617512 : if (ct->negative)
476 859839 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
2004 andres 477 GIC 859839 : cache->cc_keyno, ct->keys);
2004 andres 478 ECB :
8469 tgl 479 GIC 1617512 : pfree(ct);
8469 tgl 480 ECB :
9345 bruce 481 CBC 1617512 : --cache->cc_ntup;
7965 tgl 482 GIC 1617512 : --CacheHdr->ch_ntup;
483 : }
484 :
485 : /*
486 : * CatCacheRemoveCList
487 : *
488 : * Unlink and delete the given cache list entry
489 : *
490 : * NB: any dead member entries that become unreferenced are deleted too.
491 : */
7673 tgl 492 ECB : static void
7673 tgl 493 GIC 136147 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
494 : {
495 : int i;
7673 tgl 496 ECB :
7673 tgl 497 CBC 136147 : Assert(cl->refcount == 0);
7673 tgl 498 GIC 136147 : Assert(cl->my_cache == cache);
499 :
7673 tgl 500 ECB : /* delink from member tuples */
7522 bruce 501 GIC 440365 : for (i = cl->n_members; --i >= 0;)
7673 tgl 502 ECB : {
7673 tgl 503 GIC 304218 : CatCTup *ct = cl->members[i];
7673 tgl 504 ECB :
7673 tgl 505 CBC 304218 : Assert(ct->c_list == cl);
7673 tgl 506 GIC 304218 : ct->c_list = NULL;
6301 tgl 507 ECB : /* if the member is dead and now has no references, remove it */
6301 tgl 508 GIC 304218 : if (
6301 tgl 509 ECB : #ifndef CATCACHE_FORCE_RELEASE
6301 tgl 510 GIC 304218 : ct->dead &&
6301 tgl 511 ECB : #endif
6301 tgl 512 CBC 72 : ct->refcount == 0)
6301 tgl 513 GIC 72 : CatCacheRemoveCTup(cache, ct);
514 : }
515 :
7673 tgl 516 ECB : /* delink from linked list */
3825 tgl 517 GIC 136147 : dlist_delete(&cl->cache_elem);
518 :
2004 andres 519 ECB : /* free associated column data */
2004 andres 520 CBC 136147 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
2004 andres 521 GIC 136147 : cache->cc_keyno, cl->keys);
2004 andres 522 ECB :
7673 tgl 523 CBC 136147 : pfree(cl);
7673 tgl 524 GIC 136147 : }
525 :
526 :
527 : /*
528 : * CatCacheInvalidate
529 : *
530 : * Invalidate entries in the specified cache, given a hash value.
531 : *
532 : * We delete cache entries that match the hash value, whether positive
533 : * or negative. We don't care whether the invalidation is the result
534 : * of a tuple insertion or a deletion.
535 : *
536 : * We used to try to match positive cache entries by TID, but that is
537 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
538 : * be queued before VACUUM FULL, and then processed afterwards, when the
539 : * target tuple that has to be invalidated has a different TID than it
540 : * did when the event was created. So now we just compare hash values and
541 : * accept the small risk of unnecessary invalidations due to false matches.
542 : *
543 : * This routine is only quasi-public: it should only be used by inval.c.
544 : */
9770 scrappy 545 ECB : void
2158 tgl 546 GIC 14189209 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
547 : {
548 : Index hashIndex;
549 : dlist_mutable_iter iter;
550 :
551 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
552 :
553 : /*
554 : * We don't bother to check whether the cache has finished initialization
555 : * yet; if not, there will be no entries in it so no problem.
556 : */
557 :
558 : /*
559 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
560 : * searches might still be correct, so just zap 'em all.
2158 tgl 561 ECB : */
2158 tgl 562 GIC 14323646 : dlist_foreach_modify(iter, &cache->cc_lists)
2158 tgl 563 ECB : {
2158 tgl 564 GIC 134437 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
7965 tgl 565 ECB :
2158 tgl 566 CBC 134437 : if (cl->refcount > 0)
2158 tgl 567 GIC 72 : cl->dead = true;
2158 tgl 568 ECB : else
2158 tgl 569 GIC 134365 : CatCacheRemoveCList(cache, cl);
570 : }
571 :
572 : /*
573 : * inspect the proper hash bucket for tuple matches
2158 tgl 574 ECB : */
2158 tgl 575 CBC 14189209 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
2158 tgl 576 GIC 22880366 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
2158 tgl 577 ECB : {
2158 tgl 578 GIC 8691157 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
7673 tgl 579 ECB :
2158 tgl 580 GIC 8691157 : if (hashValue == ct->hash_value)
9345 bruce 581 ECB : {
2158 tgl 582 CBC 1539453 : if (ct->refcount > 0 ||
2158 tgl 583 GIC 1538997 : (ct->c_list && ct->c_list->refcount > 0))
8179 tgl 584 ECB : {
2158 tgl 585 GIC 528 : ct->dead = true;
2158 tgl 586 ECB : /* list, if any, was marked dead above */
2158 tgl 587 GIC 528 : Assert(ct->c_list == NULL || ct->c_list->dead);
588 : }
2158 tgl 589 ECB : else
2158 tgl 590 GIC 1538925 : CatCacheRemoveCTup(cache, ct);
591 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
592 : #ifdef CATCACHE_STATS
593 : cache->cc_invals++;
594 : #endif
595 : /* could be multiple matches, so keep looking! */
596 : }
9770 scrappy 597 ECB : }
9770 scrappy 598 GIC 14189209 : }
599 :
600 : /* ----------------------------------------------------------------
601 : * public functions
602 : * ----------------------------------------------------------------
603 : */
604 :
605 :
606 : /*
607 : * Standard routine for creating cache context if it doesn't exist yet
608 : *
609 : * There are a lot of places (probably far more than necessary) that check
610 : * whether CacheMemoryContext exists yet and want to create it if not.
611 : * We centralize knowledge of exactly how to create it here.
612 : */
7707 tgl 613 ECB : void
7707 tgl 614 GIC 11559 : CreateCacheMemoryContext(void)
615 : {
616 : /*
617 : * Purely for paranoia, check that context doesn't exist; caller probably
618 : * did so already.
7707 tgl 619 ECB : */
7707 tgl 620 CBC 11559 : if (!CacheMemoryContext)
7707 tgl 621 GIC 11559 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
622 : "CacheMemoryContext",
2416 tgl 623 ECB : ALLOCSET_DEFAULT_SIZES);
7707 tgl 624 GIC 11559 : }
625 :
626 :
627 : /*
628 : * ResetCatalogCache
629 : *
630 : * Reset one catalog cache to empty.
631 : *
632 : * This is not very efficient if the target cache is nearly empty.
633 : * However, it shouldn't need to be efficient; we don't invoke it often.
634 : */
7965 tgl 635 ECB : static void
7965 tgl 636 GIC 157542 : ResetCatalogCache(CatCache *cache)
637 : {
638 : dlist_mutable_iter iter;
639 : int i;
640 :
7673 tgl 641 ECB : /* Remove each list in this cache, or at least mark it dead */
3827 alvherre 642 GIC 159321 : dlist_foreach_modify(iter, &cache->cc_lists)
7673 tgl 643 ECB : {
3827 alvherre 644 GIC 1779 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
7673 tgl 645 ECB :
7673 tgl 646 GBC 1779 : if (cl->refcount > 0)
7673 tgl 647 UIC 0 : cl->dead = true;
7673 tgl 648 ECB : else
7673 tgl 649 GIC 1779 : CatCacheRemoveCList(cache, cl);
650 : }
651 :
7965 tgl 652 ECB : /* Remove each tuple in this cache, or at least mark it dead */
7704 bruce 653 GIC 4804384 : for (i = 0; i < cache->cc_nbuckets; i++)
9345 bruce 654 ECB : {
3827 alvherre 655 GIC 4646842 : dlist_head *bucket = &cache->cc_bucket[i];
9345 bruce 656 ECB :
3827 alvherre 657 GIC 4724904 : dlist_foreach_modify(iter, bucket)
3827 alvherre 658 ECB : {
3827 alvherre 659 GIC 78062 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
8281 tgl 660 ECB :
6448 tgl 661 CBC 78062 : if (ct->refcount > 0 ||
6448 tgl 662 GIC 78061 : (ct->c_list && ct->c_list->refcount > 0))
6448 tgl 663 ECB : {
8179 tgl 664 GIC 1 : ct->dead = true;
6448 tgl 665 ECB : /* list, if any, was marked dead above */
6448 tgl 666 GIC 1 : Assert(ct->c_list == NULL || ct->c_list->dead);
667 : }
8179 tgl 668 ECB : else
8179 tgl 669 GIC 78061 : CatCacheRemoveCTup(cache, ct);
670 : #ifdef CATCACHE_STATS
671 : cache->cc_invals++;
672 : #endif
673 : }
9770 scrappy 674 ECB : }
7965 tgl 675 GIC 157542 : }
676 :
677 : /*
678 : * ResetCatalogCaches
679 : *
680 : * Reset all caches when a shared cache inval event forces it
681 : */
7965 tgl 682 ECB : void
7965 tgl 683 GIC 1893 : ResetCatalogCaches(void)
684 : {
685 : slist_iter iter;
686 :
687 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
7965 tgl 688 ECB :
3827 alvherre 689 GIC 159012 : slist_foreach(iter, &CacheHdr->ch_caches)
3827 alvherre 690 ECB : {
3827 alvherre 691 GIC 157119 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
3827 alvherre 692 ECB :
7965 tgl 693 GIC 157119 : ResetCatalogCache(cache);
694 : }
695 :
1511 peter 696 ECB : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
9770 scrappy 697 GIC 1893 : }
698 :
699 : /*
700 : * CatalogCacheFlushCatalog
701 : *
702 : * Flush all catcache entries that came from the specified system catalog.
703 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
704 : * tuples very likely now have different TIDs than before. (At one point
705 : * we also tried to force re-execution of CatalogCacheInitializeCache for
706 : * the cache(s) on that catalog. This is a bad idea since it leads to all
707 : * kinds of trouble if a cache flush occurs while loading cache entries.
708 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
709 : * rather than relying on the relcache to keep a tupdesc for us. Of course
710 : * this assumes the tupdesc of a cachable system table will not change...)
711 : */
4809 tgl 712 ECB : void
4809 tgl 713 GIC 333 : CatalogCacheFlushCatalog(Oid catId)
714 : {
715 : slist_iter iter;
716 :
717 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
4809 tgl 718 ECB :
3825 tgl 719 GIC 27972 : slist_foreach(iter, &CacheHdr->ch_caches)
4809 tgl 720 ECB : {
3827 alvherre 721 GIC 27639 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
722 :
4809 tgl 723 ECB : /* Does this cache store tuples of the target catalog? */
4254 tgl 724 GIC 27639 : if (cache->cc_reloid == catId)
725 : {
4809 tgl 726 ECB : /* Yes, so flush all its contents */
4809 tgl 727 GIC 423 : ResetCatalogCache(cache);
728 :
4809 tgl 729 ECB : /* Tell inval.c to call syscache callbacks for this cache */
4254 tgl 730 GIC 423 : CallSyscacheCallbacks(cache->id, 0);
731 : }
732 : }
733 :
1511 peter 734 ECB : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
4809 tgl 735 GIC 333 : }
736 :
737 : /*
738 : * InitCatCache
739 : *
740 : * This allocates and initializes a cache for a system catalog relation.
741 : * Actually, the cache is only partially initialized to avoid opening the
742 : * relation. The relation will be opened and the rest of the cache
743 : * structure initialized on the first access.
744 : */
745 : #ifdef CACHEDEBUG
746 : #define InitCatCache_DEBUG2 \
747 : do { \
748 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
749 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
750 : cp->cc_nkeys, cp->cc_nbuckets); \
751 : } while(0)
752 : #else
753 : #define InitCatCache_DEBUG2
754 : #endif
755 :
7965 tgl 756 ECB : CatCache *
8179 tgl 757 GIC 959397 : InitCatCache(int id,
758 : Oid reloid,
759 : Oid indexoid,
760 : int nkeys,
761 : const int *key,
762 : int nbuckets)
763 : {
764 : CatCache *cp;
765 : MemoryContext oldcxt;
766 : int i;
767 :
768 : /*
769 : * nbuckets is the initial number of hash buckets to use in this catcache.
770 : * It will be enlarged later if it becomes too full.
771 : *
772 : * nbuckets must be a power of two. We check this via Assert rather than
773 : * a full runtime check because the values will be coming from constant
774 : * tables.
775 : *
776 : * If you're confused by the power-of-two check, see comments in
6142 tgl 777 ECB : * bitmapset.c for an explanation.
778 : */
6142 tgl 779 GIC 959397 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
780 :
781 : /*
782 : * first switch to the cache context so our allocations do not vanish at
6385 bruce 783 ECB : * the end of a transaction
9345 bruce 784 EUB : */
8320 tgl 785 GIC 959397 : if (!CacheMemoryContext)
8320 tgl 786 LBC 0 : CreateCacheMemoryContext();
787 :
8320 tgl 788 GIC 959397 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
789 :
790 : /*
6142 tgl 791 ECB : * if first time through, initialize the cache group header
792 : */
7965 tgl 793 CBC 959397 : if (CacheHdr == NULL)
7965 tgl 794 ECB : {
7965 tgl 795 CBC 11559 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
3827 alvherre 796 GIC 11559 : slist_init(&CacheHdr->ch_caches);
7965 tgl 797 11559 : CacheHdr->ch_ntup = 0;
798 : #ifdef CATCACHE_STATS
799 : /* set up to dump stats at backend exit */
800 : on_proc_exit(CatCachePrintStats, 0);
801 : #endif
802 : }
803 :
804 : /*
805 : * Allocate a new cache structure, aligning to a cacheline boundary
806 : *
3825 tgl 807 ECB : * Note: we rely on zeroing to initialize all the dlist headers correctly
808 : */
108 drowley 809 GNC 959397 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
810 : MCXT_ALLOC_ZERO);
3503 heikki.linnakangas 811 GIC 959397 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
812 :
813 : /*
814 : * initialize the cache's relation information for the relation
815 : * corresponding to this cache, and initialize some of the new cache's
8053 bruce 816 ECB : * other internal fields. But don't open the relation yet.
9345 817 : */
7965 tgl 818 CBC 959397 : cp->id = id;
6569 819 959397 : cp->cc_relname = "(not known yet)";
820 959397 : cp->cc_reloid = reloid;
821 959397 : cp->cc_indexoid = indexoid;
7836 bruce 822 959397 : cp->cc_relisshared = false; /* temporary */
9345 823 959397 : cp->cc_tupdesc = (TupleDesc) NULL;
7965 tgl 824 959397 : cp->cc_ntup = 0;
6142 825 959397 : cp->cc_nbuckets = nbuckets;
9345 bruce 826 959397 : cp->cc_nkeys = nkeys;
9345 bruce 827 GIC 2519862 : for (i = 0; i < nkeys; ++i)
2004 andres 828 1560465 : cp->cc_keyno[i] = key[i];
829 :
830 : /*
831 : * new cache is initialized as far as we can go for now. print some
832 : * debugging information, if appropriate.
833 : */
834 : InitCatCache_DEBUG2;
835 :
836 : /*
7965 tgl 837 ECB : * add completed cache to top of group header's list
838 : */
3827 alvherre 839 GIC 959397 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
840 :
841 : /*
8053 bruce 842 ECB : * back to the old context before we return...
843 : */
9345 bruce 844 CBC 959397 : MemoryContextSwitchTo(oldcxt);
845 :
8986 bruce 846 GIC 959397 : return cp;
847 : }
848 :
849 : /*
850 : * Enlarge a catcache, doubling the number of buckets.
3503 heikki.linnakangas 851 ECB : */
852 : static void
3503 heikki.linnakangas 853 GIC 10362 : RehashCatCache(CatCache *cp)
854 : {
855 : dlist_head *newbucket;
856 : int newnbuckets;
3503 heikki.linnakangas 857 ECB : int i;
858 :
3503 heikki.linnakangas 859 GIC 10362 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
860 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
3503 heikki.linnakangas 861 ECB :
862 : /* Allocate a new, larger, hash table. */
3503 heikki.linnakangas 863 GIC 10362 : newnbuckets = cp->cc_nbuckets * 2;
864 10362 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
3503 heikki.linnakangas 865 ECB :
866 : /* Move all entries from old hash table to new. */
3503 heikki.linnakangas 867 GIC 962886 : for (i = 0; i < cp->cc_nbuckets; i++)
868 : {
3503 heikki.linnakangas 869 ECB : dlist_mutable_iter iter;
870 :
3503 heikki.linnakangas 871 CBC 2867934 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
3503 heikki.linnakangas 872 ECB : {
3260 bruce 873 GIC 1915410 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
3503 heikki.linnakangas 874 CBC 1915410 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
3503 heikki.linnakangas 875 ECB :
3503 heikki.linnakangas 876 GIC 1915410 : dlist_delete(iter.cur);
877 1915410 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
878 : }
879 : }
3503 heikki.linnakangas 880 ECB :
881 : /* Switch to the new array. */
3503 heikki.linnakangas 882 CBC 10362 : pfree(cp->cc_bucket);
883 10362 : cp->cc_nbuckets = newnbuckets;
3503 heikki.linnakangas 884 GIC 10362 : cp->cc_bucket = newbucket;
885 10362 : }
886 :
887 : /*
888 : * CatalogCacheInitializeCache
889 : *
890 : * This function does final initialization of a catcache: obtain the tuple
891 : * descriptor and set up the hash and equality function links. We assume
892 : * that the relcache entry can be opened at this point!
893 : */
894 : #ifdef CACHEDEBUG
895 : #define CatalogCacheInitializeCache_DEBUG1 \
896 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
897 : cache->cc_reloid)
898 :
899 : #define CatalogCacheInitializeCache_DEBUG2 \
900 : do { \
901 : if (cache->cc_keyno[i] > 0) { \
902 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
903 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
904 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
905 : } else { \
906 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
907 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
908 : } \
909 : } while(0)
910 : #else
911 : #define CatalogCacheInitializeCache_DEBUG1
912 : #define CatalogCacheInitializeCache_DEBUG2
913 : #endif
7707 tgl 914 ECB :
915 : static void
7707 tgl 916 GIC 263172 : CatalogCacheInitializeCache(CatCache *cache)
917 : {
918 : Relation relation;
919 : MemoryContext oldcxt;
920 : TupleDesc tupdesc;
921 : int i;
922 :
6569 tgl 923 ECB : CatalogCacheInitializeCache_DEBUG1;
924 :
1539 andres 925 GIC 263172 : relation = table_open(cache->cc_reloid, AccessShareLock);
926 :
927 : /*
928 : * switch to the cache context so our allocations do not vanish at the end
6385 bruce 929 ECB : * of a transaction
930 : */
7707 tgl 931 CBC 263170 : Assert(CacheMemoryContext != NULL);
932 :
7707 tgl 933 GIC 263170 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
934 :
935 : /*
7707 tgl 936 ECB : * copy the relcache's tuple descriptor to permanent cache storage
937 : */
7707 tgl 938 GIC 263170 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
939 :
940 : /*
941 : * save the relation's name and relisshared flag, too (cc_relname is used
6385 bruce 942 ECB : * only for debugging purposes)
7707 tgl 943 : */
6569 tgl 944 GIC 263170 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
7707 945 263170 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
946 :
947 : /*
7707 tgl 948 ECB : * return to the caller's memory context and close the rel
949 : */
7707 tgl 950 CBC 263170 : MemoryContextSwitchTo(oldcxt);
951 :
1539 andres 952 GIC 263170 : table_close(relation, AccessShareLock);
953 :
954 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
955 : cache->cc_relname, cache->cc_nkeys);
956 :
957 : /*
7707 tgl 958 ECB : * initialize cache's key information
959 : */
7707 tgl 960 GIC 684937 : for (i = 0; i < cache->cc_nkeys; ++i)
961 : {
962 : Oid keytype;
963 : RegProcedure eqfunc;
964 :
7707 tgl 965 ECB : CatalogCacheInitializeCache_DEBUG2;
966 :
2004 andres 967 CBC 421767 : if (cache->cc_keyno[i] > 0)
968 : {
2058 andres 969 GIC 421767 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
2004 andres 970 ECB : cache->cc_keyno[i] - 1);
971 :
3215 tgl 972 CBC 421767 : keytype = attr->atttypid;
973 : /* cache key columns should always be NOT NULL */
3215 tgl 974 GIC 421767 : Assert(attr->attnotnull);
975 : }
7707 tgl 976 EUB : else
977 : {
1601 andres 978 UBC 0 : if (cache->cc_keyno[i] < 0)
1601 andres 979 UIC 0 : elog(FATAL, "sys attributes are not supported in caches");
7707 tgl 980 0 : keytype = OIDOID;
7707 tgl 981 ECB : }
982 :
7231 tgl 983 GIC 421767 : GetCCHashEqFuncs(keytype,
984 : &cache->cc_hashfunc[i],
985 : &eqfunc,
986 : &cache->cc_fastequal[i]);
987 :
988 : /*
989 : * Do equality-function lookup (we assume this won't need a catalog
6385 bruce 990 ECB : * lookup for any supported type)
991 : */
7091 tgl 992 GIC 421767 : fmgr_info_cxt(eqfunc,
993 : &cache->cc_skey[i].sk_func,
994 : CacheMemoryContext);
7707 tgl 995 ECB :
996 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
2004 andres 997 GIC 421767 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
7707 tgl 998 ECB :
7088 999 : /* Fill in sk_strategy as well --- always standard equality */
7091 tgl 1000 GIC 421767 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
7088 tgl 1001 CBC 421767 : cache->cc_skey[i].sk_subtype = InvalidOid;
1002 : /* If a catcache key requires a collation, it must be C collation */
1573 tgl 1003 GIC 421767 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1004 :
1005 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1006 : cache->cc_relname, i, cache);
1007 : }
1008 :
1009 : /*
7707 tgl 1010 ECB : * mark this cache fully initialized
1011 : */
7707 tgl 1012 GIC 263170 : cache->cc_tupdesc = tupdesc;
1013 263170 : }
1014 :
1015 : /*
1016 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1017 : *
1018 : * One reason to call this routine is to ensure that the relcache has
1019 : * created entries for all the catalogs and indexes referenced by catcaches.
1020 : * Therefore, provide an option to open the index as well as fixing the
1021 : * cache itself. An exception is the indexes on pg_am, which we don't use
1022 : * (cf. IndexScanOK).
7707 tgl 1023 ECB : */
1024 : void
6029 tgl 1025 CBC 112322 : InitCatCachePhase2(CatCache *cache, bool touch_index)
7707 tgl 1026 ECB : {
7707 tgl 1027 GIC 112322 : if (cache->cc_tupdesc == NULL)
7707 tgl 1028 CBC 104387 : CatalogCacheInitializeCache(cache);
7707 tgl 1029 ECB :
6029 tgl 1030 CBC 112320 : if (touch_index &&
6029 tgl 1031 GIC 101637 : cache->id != AMOID &&
7707 1032 100412 : cache->id != AMNAME)
1033 : {
1034 : Relation idesc;
1035 :
1036 : /*
1037 : * We must lock the underlying catalog before opening the index to
1038 : * avoid deadlock, since index_open could possibly result in reading
1039 : * this same catalog, and if anyone else is exclusive-locking this
4401 tgl 1040 ECB : * catalog and index they'll be doing it in that order.
1041 : */
4401 tgl 1042 GIC 99187 : LockRelationOid(cache->cc_reloid, AccessShareLock);
6096 1043 99187 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1044 :
1045 : /*
1046 : * While we've got the index open, let's check that it's unique (and
1047 : * not just deferrable-unique, thank you very much). This is just to
1048 : * catch thinkos in definitions of new catcaches, so we don't worry
3215 tgl 1049 ECB : * about the pg_am indexes not getting tested.
1050 : */
3215 tgl 1051 GIC 99187 : Assert(idesc->rd_index->indisunique &&
3215 tgl 1052 ECB : idesc->rd_index->indimmediate);
1053 :
6096 tgl 1054 GIC 99187 : index_close(idesc, AccessShareLock);
4401 tgl 1055 CBC 99187 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1056 : }
7707 tgl 1057 GIC 112320 : }
1058 :
1059 :
1060 : /*
1061 : * IndexScanOK
1062 : *
1063 : * This function checks for tuples that will be fetched by
1064 : * IndexSupportInitialize() during relcache initialization for
1065 : * certain system indexes that support critical syscaches.
1066 : * We can't use an indexscan to fetch these, else we'll get into
1067 : * infinite recursion. A plain heap scan will work, however.
1068 : * Once we have completed relcache initialization (signaled by
1069 : * criticalRelcachesBuilt), we don't have to worry anymore.
1070 : *
1071 : * Similarly, during backend startup we have to be able to use the
1072 : * pg_authid, pg_auth_members and pg_database syscaches for
1073 : * authentication even if we don't yet have relcache entries for those
1074 : * catalogs' indexes.
8560 bruce 1075 ECB : */
1076 : static bool
8185 tgl 1077 CBC 4793859 : IndexScanOK(CatCache *cache, ScanKey cur_skey)
1078 : {
4737 1079 4793859 : switch (cache->id)
1080 : {
4737 tgl 1081 GIC 286719 : case INDEXRELID:
1082 :
1083 : /*
1084 : * Rather than tracking exactly which indexes have to be loaded
1085 : * before we can use indexscans (which changes from time to time),
1086 : * just force all pg_index searches to be heap scans until we've
4737 tgl 1087 ECB : * built the critical relcaches.
1088 : */
4737 tgl 1089 CBC 286719 : if (!criticalRelcachesBuilt)
4737 tgl 1090 GIC 57165 : return false;
4737 tgl 1091 CBC 229554 : break;
1092 :
4737 tgl 1093 GIC 22868 : case AMOID:
1094 : case AMNAME:
1095 :
1096 : /*
1097 : * Always do heap scans in pg_am, because it's so small there's
1098 : * not much point in an indexscan anyway. We *must* do this when
1099 : * initially building critical relcache entries, but we might as
4737 tgl 1100 ECB : * well just always do it.
1101 : */
8185 tgl 1102 CBC 22868 : return false;
1103 :
4737 tgl 1104 GIC 44159 : case AUTHNAME:
1105 : case AUTHOID:
1106 : case AUTHMEMMEMROLE:
1107 : case DATABASEOID:
1108 :
1109 : /*
1110 : * Protect authentication lookups occurring before relcache has
4737 tgl 1111 ECB : * collected entries for shared indexes.
1112 : */
4737 tgl 1113 CBC 44159 : if (!criticalSharedRelcachesBuilt)
4737 tgl 1114 GIC 1618 : return false;
4737 tgl 1115 CBC 42541 : break;
4737 tgl 1116 ECB :
4737 tgl 1117 GIC 4440113 : default:
1118 4440113 : break;
1119 : }
8560 bruce 1120 ECB :
1121 : /* Normal case, allow index scan */
8185 tgl 1122 GIC 4712208 : return true;
1123 : }
1124 :
1125 : /*
1126 : * SearchCatCache
1127 : *
1128 : * This call searches a system cache for a tuple, opening the relation
1129 : * if necessary (on the first access to a particular cache).
1130 : *
1131 : * The result is NULL if not found, or a pointer to a HeapTuple in
1132 : * the cache. The caller must not modify the tuple, and must call
1133 : * ReleaseCatCache() when done with it.
1134 : *
1135 : * The search key values should be expressed as Datums of the key columns'
1136 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1137 : * exception, the passed-in key for a NAME column can be just a C string;
1138 : * the caller need not go to the trouble of converting it to a fully
1139 : * null-padded NAME.
9770 scrappy 1140 ECB : */
1141 : HeapTuple
8179 tgl 1142 GIC 4844245 : SearchCatCache(CatCache *cache,
1143 : Datum v1,
1144 : Datum v2,
1145 : Datum v3,
9345 bruce 1146 ECB : Datum v4)
1147 : {
2004 andres 1148 GIC 4844245 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1149 : }
1150 :
1151 :
1152 : /*
1153 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1154 : * arguments. The compiler can inline the body and unroll loops, making them a
1155 : * bit faster than SearchCatCache().
1156 : */
2004 andres 1157 ECB :
1158 : HeapTuple
2004 andres 1159 GIC 40407613 : SearchCatCache1(CatCache *cache,
2004 andres 1160 ECB : Datum v1)
1161 : {
2004 andres 1162 GIC 40407613 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1163 : }
1164 :
2004 andres 1165 ECB :
1166 : HeapTuple
2004 andres 1167 GIC 4343986 : SearchCatCache2(CatCache *cache,
2004 andres 1168 ECB : Datum v1, Datum v2)
1169 : {
2004 andres 1170 GIC 4343986 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1171 : }
1172 :
2004 andres 1173 ECB :
1174 : HeapTuple
2004 andres 1175 GIC 2145114 : SearchCatCache3(CatCache *cache,
2004 andres 1176 ECB : Datum v1, Datum v2, Datum v3)
1177 : {
2004 andres 1178 GIC 2145114 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1179 : }
1180 :
2004 andres 1181 ECB :
1182 : HeapTuple
2004 andres 1183 GIC 1883086 : SearchCatCache4(CatCache *cache,
2004 andres 1184 ECB : Datum v1, Datum v2, Datum v3, Datum v4)
1185 : {
2004 andres 1186 GIC 1883086 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1187 : }
1188 :
1189 : /*
1190 : * Work-horse for SearchCatCache/SearchCatCacheN.
2004 andres 1191 ECB : */
1192 : static inline HeapTuple
2004 andres 1193 GIC 53624044 : SearchCatCacheInternal(CatCache *cache,
1194 : int nkeys,
1195 : Datum v1,
1196 : Datum v2,
1197 : Datum v3,
1198 : Datum v4)
1199 : {
1200 : Datum arguments[CATCACHE_MAXKEYS];
1201 : uint32 hashValue;
1202 : Index hashIndex;
1203 : dlist_iter iter;
1204 : dlist_head *bucket;
1205 : CatCTup *ct;
9345 bruce 1206 ECB :
1207 : /* Make sure we're in an xact, even if this ends up being a cache hit */
3555 rhaas 1208 CBC 53624044 : Assert(IsTransactionState());
1209 :
2004 andres 1210 GIC 53624044 : Assert(cache->cc_nkeys == nkeys);
1211 :
1212 : /*
7901 tgl 1213 ECB : * one-time startup overhead for each cache
9345 bruce 1214 : */
2004 andres 1215 GIC 53624044 : if (unlikely(cache->cc_tupdesc == NULL))
8185 tgl 1216 133839 : CatalogCacheInitializeCache(cache);
1217 :
1218 : #ifdef CATCACHE_STATS
1219 : cache->cc_searches++;
1220 : #endif
7719 tgl 1221 ECB :
2004 andres 1222 : /* Initialize local parameter array */
2004 andres 1223 CBC 53624044 : arguments[0] = v1;
1224 53624044 : arguments[1] = v2;
2004 andres 1225 GIC 53624044 : arguments[2] = v3;
1226 53624044 : arguments[3] = v4;
1227 :
1228 : /*
8053 bruce 1229 ECB : * find the hash bucket in which to look for the tuple
9345 1230 : */
2004 andres 1231 GIC 53624044 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
7704 bruce 1232 53624044 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1233 :
1234 : /*
1235 : * scan the hash bucket until we find a match or exhaust our tuples
1236 : *
1237 : * Note: it's okay to use dlist_foreach here, even though we modify the
3825 tgl 1238 ECB : * dlist within the loop, because we don't continue the loop afterwards.
9345 bruce 1239 : */
3827 alvherre 1240 GIC 53624044 : bucket = &cache->cc_bucket[hashIndex];
3825 tgl 1241 CBC 59159533 : dlist_foreach(iter, bucket)
1242 : {
3827 alvherre 1243 54587566 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
8179 tgl 1244 EUB :
8179 tgl 1245 GIC 54587566 : if (ct->dead)
8179 tgl 1246 LBC 0 : continue; /* ignore dead entries */
8179 tgl 1247 ECB :
7707 tgl 1248 GIC 54587566 : if (ct->hash_value != hashValue)
7707 tgl 1249 CBC 5535489 : continue; /* quickly skip entry if wrong hash val */
7707 tgl 1250 EUB :
2004 andres 1251 GIC 49052077 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
8179 tgl 1252 UIC 0 : continue;
1253 :
1254 : /*
1255 : * We found a match in the cache. Move it to the front of the list
1256 : * for its hashbucket, in order to speed subsequent searches. (The
1257 : * most frequently accessed elements in any hashbucket will tend to be
6142 tgl 1258 ECB : * near the front of the hashbucket's list.)
1259 : */
3827 alvherre 1260 GIC 49052077 : dlist_move_head(bucket, &ct->cache_elem);
1261 :
1262 : /*
1263 : * If it's a positive entry, bump its refcount and return it. If it's
6385 bruce 1264 ECB : * negative, we can report failure to the caller.
1265 : */
7707 tgl 1266 CBC 49052077 : if (!ct->negative)
7707 tgl 1267 ECB : {
6840 tgl 1268 CBC 45626211 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
7707 tgl 1269 GIC 45626211 : ct->refcount++;
6840 1270 45626211 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1271 :
1272 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1273 : cache->cc_relname, hashIndex);
1274 :
1275 : #ifdef CATCACHE_STATS
1276 : cache->cc_hits++;
7719 tgl 1277 ECB : #endif
1278 :
7707 tgl 1279 GIC 45626211 : return &ct->tuple;
1280 : }
1281 : else
1282 : {
1283 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1284 : cache->cc_relname, hashIndex);
1285 :
1286 : #ifdef CATCACHE_STATS
1287 : cache->cc_neg_hits++;
7707 tgl 1288 ECB : #endif
1289 :
7707 tgl 1290 GIC 3425866 : return NULL;
1291 : }
9345 bruce 1292 ECB : }
1293 :
2004 andres 1294 GIC 4571967 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1295 : }
1296 :
1297 : /*
1298 : * Search the actual catalogs, rather than the cache.
1299 : *
1300 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1301 : * as small as possible. To avoid that effort being undone by a helpful
1302 : * compiler, try to explicitly forbid inlining.
2004 andres 1303 ECB : */
1304 : static pg_noinline HeapTuple
2004 andres 1305 GIC 4571967 : SearchCatCacheMiss(CatCache *cache,
1306 : int nkeys,
1307 : uint32 hashValue,
1308 : Index hashIndex,
1309 : Datum v1,
1310 : Datum v2,
1311 : Datum v3,
1312 : Datum v4)
1313 : {
1314 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1315 : Relation relation;
1316 : SysScanDesc scandesc;
1317 : HeapTuple ntp;
1318 : CatCTup *ct;
1319 : Datum arguments[CATCACHE_MAXKEYS];
2004 andres 1320 ECB :
1321 : /* Initialize local parameter array */
2004 andres 1322 CBC 4571967 : arguments[0] = v1;
1323 4571967 : arguments[1] = v2;
2004 andres 1324 GIC 4571967 : arguments[2] = v3;
1325 4571967 : arguments[3] = v4;
1326 :
1327 : /*
1328 : * Ok, need to make a lookup in the relation, copy the scankey and fill
2004 andres 1329 ECB : * out any per-call fields.
1330 : */
2004 andres 1331 CBC 4571967 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1332 4571967 : cur_skey[0].sk_argument = v1;
1333 4571967 : cur_skey[1].sk_argument = v2;
2004 andres 1334 GIC 4571967 : cur_skey[2].sk_argument = v3;
1335 4571967 : cur_skey[3].sk_argument = v4;
1336 :
1337 : /*
1338 : * Tuple was not found in cache, so we have to try to retrieve it directly
1339 : * from the relation. If found, we will add it to the cache; if not
1340 : * found, we will add a negative cache entry instead.
1341 : *
1342 : * NOTE: it is possible for recursive cache lookups to occur while reading
1343 : * the relation --- for example, due to shared-cache-inval messages being
1344 : * processed during table_open(). This is OK. It's even possible for one
1345 : * of those lookups to find and enter the very same tuple we are trying to
1346 : * fetch here. If that happens, we will enter a second copy of the tuple
1347 : * into the cache. The first copy will never be referenced again, and
1348 : * will eventually age out of the cache, so there's no functional problem.
1349 : * This case is rare enough that it's not worth expending extra cycles to
6385 bruce 1350 ECB : * detect.
1351 : */
1539 andres 1352 CBC 4571967 : relation = table_open(cache->cc_reloid, AccessShareLock);
1353 :
7673 tgl 1354 4571967 : scandesc = systable_beginscan(relation,
1355 : cache->cc_indexoid,
7673 tgl 1356 GIC 4571967 : IndexScanOK(cache, cur_skey),
1357 : NULL,
1358 : nkeys,
7673 tgl 1359 ECB : cur_skey);
1360 :
7673 tgl 1361 CBC 4571967 : ct = NULL;
1362 :
1363 4571967 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1364 : {
2004 andres 1365 GIC 2402821 : ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1366 : hashValue, hashIndex,
7673 tgl 1367 ECB : false);
6840 1368 : /* immediately set the refcount to 1 */
6840 tgl 1369 CBC 2402821 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1370 2402821 : ct->refcount++;
6840 tgl 1371 GIC 2402821 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
7673 1372 2402821 : break; /* assume only one match */
7673 tgl 1373 ECB : }
1374 :
7673 tgl 1375 CBC 4571966 : systable_endscan(scandesc);
1376 :
1539 andres 1377 GIC 4571966 : table_close(relation, AccessShareLock);
1378 :
1379 : /*
1380 : * If tuple was not found, we need to build a negative cache entry
1381 : * containing a fake tuple. The fake tuple has the correct key columns,
1382 : * but nulls everywhere else.
1383 : *
1384 : * In bootstrap mode, we don't build negative entries, because the cache
1385 : * invalidation mechanism isn't alive and can't clear them if the tuple
1386 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
6385 bruce 1387 ECB : * cache inval for that.)
1388 : */
7673 tgl 1389 CBC 4571966 : if (ct == NULL)
7673 tgl 1390 ECB : {
6569 tgl 1391 GIC 2169145 : if (IsBootstrapProcessingMode())
6569 tgl 1392 CBC 165615 : return NULL;
1393 :
2004 andres 1394 GIC 2003530 : ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1395 : hashValue, hashIndex,
1396 : true);
1397 :
1398 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1399 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1400 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1401 : cache->cc_relname, hashIndex);
1402 :
1403 : /*
1404 : * We are not returning the negative entry to the caller, so leave its
1405 : * refcount zero.
7673 tgl 1406 ECB : */
1407 :
7673 tgl 1408 GIC 2003530 : return NULL;
1409 : }
1410 :
1411 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1412 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1413 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1414 : cache->cc_relname, hashIndex);
1415 :
1416 : #ifdef CATCACHE_STATS
1417 : cache->cc_newloads++;
7673 tgl 1418 ECB : #endif
1419 :
7673 tgl 1420 GIC 2402821 : return &ct->tuple;
1421 : }
1422 :
1423 : /*
1424 : * ReleaseCatCache
1425 : *
1426 : * Decrement the reference count of a catcache entry (releasing the
1427 : * hold grabbed by a successful SearchCatCache).
1428 : *
1429 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1430 : * will be freed as soon as their refcount goes to zero. In combination
1431 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1432 : * to catch references to already-released catcache entries.
7673 tgl 1433 ECB : */
1434 : void
7673 tgl 1435 CBC 48029032 : ReleaseCatCache(HeapTuple tuple)
1436 : {
7673 tgl 1437 GIC 48029032 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1438 : offsetof(CatCTup, tuple));
7673 tgl 1439 ECB :
1440 : /* Safety checks to ensure we were handed a cache entry */
7673 tgl 1441 GIC 48029032 : Assert(ct->ct_magic == CT_MAGIC);
7673 tgl 1442 CBC 48029032 : Assert(ct->refcount > 0);
7673 tgl 1443 ECB :
7673 tgl 1444 GIC 48029032 : ct->refcount--;
6840 tgl 1445 CBC 48029032 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1446 :
6448 1447 48029032 : if (
1448 : #ifndef CATCACHE_FORCE_RELEASE
1449 48029032 : ct->dead &&
7673 tgl 1450 ECB : #endif
6448 tgl 1451 CBC 510 : ct->refcount == 0 &&
1452 454 : (ct->c_list == NULL || ct->c_list->refcount == 0))
7673 tgl 1453 GIC 454 : CatCacheRemoveCTup(ct->my_cache, ct);
1454 48029032 : }
1455 :
1456 :
1457 : /*
1458 : * GetCatCacheHashValue
1459 : *
1460 : * Compute the hash value for a given set of search keys.
1461 : *
1462 : * The reason for exposing this as part of the API is that the hash value is
1463 : * exposed in cache invalidation operations, so there are places outside the
1464 : * catcache code that need to be able to compute the hash values.
4050 tgl 1465 ECB : */
1466 : uint32
4050 tgl 1467 GIC 107393 : GetCatCacheHashValue(CatCache *cache,
1468 : Datum v1,
1469 : Datum v2,
1470 : Datum v3,
1471 : Datum v4)
1472 : {
1473 : /*
4050 tgl 1474 ECB : * one-time startup overhead for each cache
1475 : */
4050 tgl 1476 GIC 107393 : if (cache->cc_tupdesc == NULL)
1477 8976 : CatalogCacheInitializeCache(cache);
1478 :
1479 : /*
4050 tgl 1480 ECB : * calculate the hash value
1481 : */
2004 andres 1482 GIC 107393 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1483 : }
1484 :
1485 :
1486 : /*
1487 : * SearchCatCacheList
1488 : *
1489 : * Generate a list of all tuples matching a partial key (that is,
1490 : * a key specifying just the first K of the cache's N key columns).
1491 : *
1492 : * It doesn't make any sense to specify all of the cache's key columns
1493 : * here: since the key is unique, there could be at most one match, so
1494 : * you ought to use SearchCatCache() instead. Hence this function takes
1495 : * one fewer Datum argument than SearchCatCache() does.
1496 : *
1497 : * The caller must not modify the list object or the pointed-to tuples,
1498 : * and must call ReleaseCatCacheList() when done with the list.
7673 tgl 1499 ECB : */
1500 : CatCList *
7673 tgl 1501 GIC 1594531 : SearchCatCacheList(CatCache *cache,
1502 : int nkeys,
1503 : Datum v1,
1504 : Datum v2,
1896 tgl 1505 ECB : Datum v3)
1506 : {
1896 tgl 1507 GIC 1594531 : Datum v4 = 0; /* dummy last-column value */
1508 : Datum arguments[CATCACHE_MAXKEYS];
1509 : uint32 lHashValue;
1510 : dlist_iter iter;
1511 : CatCList *cl;
1512 : CatCTup *ct;
1513 : List *volatile ctlist;
1514 : ListCell *ctlist_item;
1515 : int nmembers;
1516 : bool ordered;
1517 : HeapTuple ntp;
1518 : MemoryContext oldcxt;
1519 : int i;
1520 :
1521 : /*
7673 tgl 1522 ECB : * one-time startup overhead for each cache
1523 : */
7673 tgl 1524 GIC 1594531 : if (cache->cc_tupdesc == NULL)
7673 tgl 1525 CBC 12482 : CatalogCacheInitializeCache(cache);
1526 :
7673 tgl 1527 GIC 1594531 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1528 :
1529 : #ifdef CATCACHE_STATS
1530 : cache->cc_lsearches++;
1531 : #endif
7673 tgl 1532 ECB :
2004 andres 1533 : /* Initialize local parameter array */
2004 andres 1534 CBC 1594531 : arguments[0] = v1;
1535 1594531 : arguments[1] = v2;
2004 andres 1536 GIC 1594531 : arguments[2] = v3;
1537 1594531 : arguments[3] = v4;
1538 :
1539 : /*
1540 : * compute a hash value of the given keys for faster search. We don't
1541 : * presently divide the CatCList items into buckets, but this still lets
6385 bruce 1542 ECB : * us skip non-matching items quickly most of the time.
1543 : */
2004 andres 1544 GIC 1594531 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1545 :
1546 : /*
1547 : * scan the items until we find a match or exhaust our list
1548 : *
1549 : * Note: it's okay to use dlist_foreach here, even though we modify the
3825 tgl 1550 ECB : * dlist within the loop, because we don't continue the loop afterwards.
1551 : */
3827 alvherre 1552 CBC 11791816 : dlist_foreach(iter, &cache->cc_lists)
1553 : {
1554 11569924 : cl = dlist_container(CatCList, cache_elem, iter.cur);
7673 tgl 1555 EUB :
7673 tgl 1556 GIC 11569924 : if (cl->dead)
7673 tgl 1557 LBC 0 : continue; /* ignore dead entries */
7673 tgl 1558 ECB :
7673 tgl 1559 GIC 11569924 : if (cl->hash_value != lHashValue)
1560 10197285 : continue; /* quickly skip entry if wrong hash val */
1561 :
1562 : /*
7673 tgl 1563 ECB : * see if the cached list matches our key.
8171 tgl 1564 EUB : */
7673 tgl 1565 GIC 1372639 : if (cl->nkeys != nkeys)
7673 tgl 1566 LBC 0 : continue;
2004 andres 1567 EUB :
2004 andres 1568 GIC 1372639 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
7673 tgl 1569 UIC 0 : continue;
1570 :
1571 : /*
1572 : * We found a matching list. Move the list to the front of the
1573 : * cache's list-of-lists, to speed subsequent searches. (We do not
1574 : * move the members to the fronts of their hashbucket lists, however,
1575 : * since there's no point in that unless they are searched for
6385 bruce 1576 ECB : * individually.)
1577 : */
3827 alvherre 1578 GIC 1372639 : dlist_move_head(&cache->cc_lists, &cl->cache_elem);
7673 tgl 1579 ECB :
1580 : /* Bump the list's refcount and return it */
6448 tgl 1581 CBC 1372639 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
7673 tgl 1582 GIC 1372639 : cl->refcount++;
6840 1583 1372639 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1584 :
1585 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1586 : cache->cc_relname);
1587 :
1588 : #ifdef CATCACHE_STATS
1589 : cache->cc_lhits++;
7673 tgl 1590 ECB : #endif
1591 :
7673 tgl 1592 GIC 1372639 : return cl;
1593 : }
1594 :
1595 : /*
1596 : * List was not found in cache, so we have to build it by reading the
1597 : * relation. For each matching tuple found in the relation, use an
1598 : * existing cache entry if possible, else build a new one.
1599 : *
1600 : * We have to bump the member refcounts temporarily to ensure they won't
1601 : * get dropped from the cache while loading other members. We use a PG_TRY
1602 : * block to ensure we can undo those refcounts if we get an error before
6385 bruce 1603 ECB : * we finish constructing the CatCList.
1604 : */
6453 tgl 1605 CBC 221892 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1606 :
7673 1607 221892 : ctlist = NIL;
1608 :
6453 tgl 1609 GIC 221892 : PG_TRY();
1610 : {
1611 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1612 : Relation relation;
1613 : SysScanDesc scandesc;
1614 :
1615 : /*
1616 : * Ok, need to make a lookup in the relation, copy the scankey and
2004 andres 1617 ECB : * fill out any per-call fields.
1618 : */
2004 andres 1619 CBC 221892 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1620 221892 : cur_skey[0].sk_argument = v1;
1621 221892 : cur_skey[1].sk_argument = v2;
2004 andres 1622 GIC 221892 : cur_skey[2].sk_argument = v3;
2004 andres 1623 CBC 221892 : cur_skey[3].sk_argument = v4;
1624 :
1539 1625 221892 : relation = table_open(cache->cc_reloid, AccessShareLock);
1626 :
6453 tgl 1627 443784 : scandesc = systable_beginscan(relation,
1628 : cache->cc_indexoid,
4737 tgl 1629 GIC 221892 : IndexScanOK(cache, cur_skey),
1630 : NULL,
1631 : nkeys,
1632 : cur_skey);
7673 tgl 1633 ECB :
1634 : /* The list will be ordered iff we are doing an index scan */
6453 tgl 1635 CBC 221892 : ordered = (scandesc->irel != NULL);
1636 :
6453 tgl 1637 GIC 826011 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1638 : {
6453 tgl 1639 ECB : uint32 hashValue;
1640 : Index hashIndex;
3827 alvherre 1641 GIC 604119 : bool found = false;
1642 : dlist_head *bucket;
1643 :
1644 : /*
6453 tgl 1645 ECB : * See if there's an entry for this tuple already.
7673 1646 : */
6453 tgl 1647 CBC 604119 : ct = NULL;
2004 andres 1648 GIC 604119 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
6453 tgl 1649 CBC 604119 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
9345 bruce 1650 ECB :
3827 alvherre 1651 GIC 604119 : bucket = &cache->cc_bucket[hashIndex];
3827 alvherre 1652 CBC 911173 : dlist_foreach(iter, bucket)
1653 : {
1654 418921 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
7673 tgl 1655 ECB :
6453 tgl 1656 GIC 418921 : if (ct->dead || ct->negative)
6385 bruce 1657 CBC 381 : continue; /* ignore dead and negative entries */
7673 tgl 1658 ECB :
6453 tgl 1659 GIC 418540 : if (ct->hash_value != hashValue)
6385 bruce 1660 CBC 294787 : continue; /* quickly skip entry if wrong hash val */
6453 tgl 1661 EUB :
6453 tgl 1662 GIC 123753 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
6385 bruce 1663 UIC 0 : continue; /* not same tuple */
1664 :
1665 : /*
1666 : * Found a match, but can't use it if it belongs to another
6453 tgl 1667 ECB : * list already
1668 : */
6453 tgl 1669 GIC 123753 : if (ct->c_list)
6453 tgl 1670 CBC 11886 : continue;
6453 tgl 1671 ECB :
3827 alvherre 1672 GIC 111867 : found = true;
6142 tgl 1673 111867 : break; /* A-OK */
6453 tgl 1674 ECB : }
1675 :
3827 alvherre 1676 GIC 604119 : if (!found)
6453 tgl 1677 ECB : {
1678 : /* We didn't find a usable entry, so make a new one */
2004 andres 1679 GIC 492252 : ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1680 : hashValue, hashIndex,
1681 : false);
1682 : }
1683 :
6453 tgl 1684 ECB : /* Careful here: add entry to ctlist, then bump its refcount */
6448 1685 : /* This way leaves state correct if lappend runs out of memory */
6453 tgl 1686 GIC 604119 : ctlist = lappend(ctlist, ct);
1687 604119 : ct->refcount++;
9345 bruce 1688 ECB : }
1689 :
6453 tgl 1690 CBC 221892 : systable_endscan(scandesc);
1691 :
1539 andres 1692 GIC 221892 : table_close(relation, AccessShareLock);
6453 tgl 1693 ECB :
2004 andres 1694 : /* Now we can build the CatCList entry. */
6453 tgl 1695 GIC 221892 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
6453 tgl 1696 CBC 221892 : nmembers = list_length(ctlist);
1697 : cl = (CatCList *)
2118 tgl 1698 GIC 221892 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2004 andres 1699 ECB :
1700 : /* Extract key values */
2004 andres 1701 CBC 221892 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
2004 andres 1702 GIC 221892 : arguments, cl->keys);
6453 tgl 1703 221892 : MemoryContextSwitchTo(oldcxt);
1704 :
1705 : /*
1706 : * We are now past the last thing that could trigger an elog before we
1707 : * have finished building the CatCList and remembering it in the
1708 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1709 : * we'd better do so before we start marking the members as belonging
1710 : * to the list.
6453 tgl 1711 EUB : */
1712 : }
6453 tgl 1713 UBC 0 : PG_CATCH();
1714 : {
1715 0 : foreach(ctlist_item, ctlist)
6453 tgl 1716 EUB : {
6453 tgl 1717 UBC 0 : ct = (CatCTup *) lfirst(ctlist_item);
1718 0 : Assert(ct->c_list == NULL);
1719 0 : Assert(ct->refcount > 0);
6453 tgl 1720 UIC 0 : ct->refcount--;
6448 tgl 1721 UBC 0 : if (
1722 : #ifndef CATCACHE_FORCE_RELEASE
1723 0 : ct->dead &&
6453 tgl 1724 EUB : #endif
6448 tgl 1725 UBC 0 : ct->refcount == 0 &&
6448 tgl 1726 UIC 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
6453 1727 0 : CatCacheRemoveCTup(cache, ct);
6453 tgl 1728 EUB : }
1729 :
6453 tgl 1730 LBC 0 : PG_RE_THROW();
1731 : }
6453 tgl 1732 CBC 221892 : PG_END_TRY();
7673 tgl 1733 ECB :
7673 tgl 1734 CBC 221892 : cl->cl_magic = CL_MAGIC;
1735 221892 : cl->my_cache = cache;
6840 1736 221892 : cl->refcount = 0; /* for the moment */
7673 1737 221892 : cl->dead = false;
1738 221892 : cl->ordered = ordered;
1739 221892 : cl->nkeys = nkeys;
7673 tgl 1740 GIC 221892 : cl->hash_value = lHashValue;
7673 tgl 1741 CBC 221892 : cl->n_members = nmembers;
6768 neilc 1742 ECB :
6453 tgl 1743 GIC 221892 : i = 0;
6453 tgl 1744 CBC 826011 : foreach(ctlist_item, ctlist)
7707 tgl 1745 ECB : {
6453 tgl 1746 CBC 604119 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
7673 tgl 1747 GIC 604119 : Assert(ct->c_list == NULL);
7673 tgl 1748 CBC 604119 : ct->c_list = cl;
6448 tgl 1749 ECB : /* release the temporary refcount on the member */
6448 tgl 1750 GIC 604119 : Assert(ct->refcount > 0);
6448 tgl 1751 CBC 604119 : ct->refcount--;
7673 tgl 1752 EUB : /* mark list dead if any members already dead */
7673 tgl 1753 GIC 604119 : if (ct->dead)
7673 tgl 1754 LBC 0 : cl->dead = true;
1755 : }
6453 tgl 1756 CBC 221892 : Assert(i == nmembers);
1757 :
3827 alvherre 1758 GIC 221892 : dlist_push_head(&cache->cc_lists, &cl->cache_elem);
7707 tgl 1759 ECB :
6840 1760 : /* Finally, bump the list's refcount and return it */
6840 tgl 1761 GIC 221892 : cl->refcount++;
1762 221892 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1763 :
1764 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1511 peter 1765 ECB : cache->cc_relname, nmembers);
1766 :
7673 tgl 1767 GIC 221892 : return cl;
1768 : }
1769 :
1770 : /*
1771 : * ReleaseCatCacheList
1772 : *
1773 : * Decrement the reference count of a catcache list.
7673 tgl 1774 ECB : */
1775 : void
7673 tgl 1776 GIC 1594531 : ReleaseCatCacheList(CatCList *list)
7673 tgl 1777 ECB : {
1778 : /* Safety checks to ensure we were handed a cache entry */
7673 tgl 1779 CBC 1594531 : Assert(list->cl_magic == CL_MAGIC);
1780 1594531 : Assert(list->refcount > 0);
7673 tgl 1781 GIC 1594531 : list->refcount--;
6840 tgl 1782 CBC 1594531 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1783 :
6448 1784 1594531 : if (
1785 : #ifndef CATCACHE_FORCE_RELEASE
1786 1594531 : list->dead &&
7673 tgl 1787 ECB : #endif
6448 tgl 1788 CBC 3 : list->refcount == 0)
7673 tgl 1789 GIC 3 : CatCacheRemoveCList(list->my_cache, list);
1790 1594531 : }
1791 :
1792 :
1793 : /*
1794 : * CatalogCacheCreateEntry
1795 : * Create a new CatCTup entry, copying the given HeapTuple and other
1796 : * supplied data into it. The new entry initially has refcount 0.
7673 tgl 1797 ECB : */
1798 : static CatCTup *
2004 andres 1799 GIC 4898603 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
1800 : uint32 hashValue, Index hashIndex,
1801 : bool negative)
1802 : {
1803 : CatCTup *ct;
1804 : HeapTuple dtp;
1805 : MemoryContext oldcxt;
7673 tgl 1806 ECB :
1807 : /* negative entries have no tuple associated */
2004 andres 1808 GIC 4898603 : if (ntp)
1809 : {
2004 andres 1810 ECB : int i;
1811 :
2004 andres 1812 GIC 2895073 : Assert(!negative);
1813 :
1814 : /*
1815 : * If there are any out-of-line toasted fields in the tuple, expand
1816 : * them in-line. This saves cycles during later use of the catcache
1817 : * entry, and also protects us against the possibility of the toast
1818 : * tuples being freed before we attempt to fetch them, in case of
2004 andres 1819 ECB : * something using a slightly stale catcache entry.
1820 : */
2004 andres 1821 GIC 2895073 : if (HeapTupleHasExternal(ntp))
2004 andres 1822 CBC 3047 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1823 : else
2004 andres 1824 GIC 2892026 : dtp = ntp;
2004 andres 1825 ECB :
1826 : /* Allocate memory for CatCTup and the cached tuple in one go */
2004 andres 1827 CBC 2895073 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
7673 tgl 1828 ECB :
2004 andres 1829 CBC 2895073 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
1830 2895073 : MAXIMUM_ALIGNOF + dtp->t_len);
1831 2895073 : ct->tuple.t_len = dtp->t_len;
1832 2895073 : ct->tuple.t_self = dtp->t_self;
1833 2895073 : ct->tuple.t_tableOid = dtp->t_tableOid;
2004 andres 1834 GIC 2895073 : ct->tuple.t_data = (HeapTupleHeader)
2004 andres 1835 CBC 2895073 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
2004 andres 1836 ECB : /* copy tuple contents */
2004 andres 1837 CBC 2895073 : memcpy((char *) ct->tuple.t_data,
1838 2895073 : (const char *) dtp->t_data,
2004 andres 1839 GIC 2895073 : dtp->t_len);
2004 andres 1840 CBC 2895073 : MemoryContextSwitchTo(oldcxt);
2004 andres 1841 ECB :
2004 andres 1842 GIC 2895073 : if (dtp != ntp)
1843 3047 : heap_freetuple(dtp);
2004 andres 1844 ECB :
1845 : /* extract keys - they'll point into the tuple if not by-value */
2004 andres 1846 GIC 8408463 : for (i = 0; i < cache->cc_nkeys; i++)
1847 : {
1848 : Datum atp;
2004 andres 1849 ECB : bool isnull;
1850 :
2004 andres 1851 GIC 5513390 : atp = heap_getattr(&ct->tuple,
1852 : cache->cc_keyno[i],
2004 andres 1853 ECB : cache->cc_tupdesc,
1854 : &isnull);
2004 andres 1855 GIC 5513390 : Assert(!isnull);
1856 5513390 : ct->keys[i] = atp;
1857 : }
1858 : }
2004 andres 1859 ECB : else
1860 : {
2004 andres 1861 CBC 2003530 : Assert(negative);
2004 andres 1862 GIC 2003530 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1863 2003530 : ct = (CatCTup *) palloc(sizeof(CatCTup));
1864 :
1865 : /*
1866 : * Store keys - they'll point into separately allocated memory if not
2004 andres 1867 ECB : * by-value.
1868 : */
2004 andres 1869 CBC 2003530 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2004 andres 1870 GIC 2003530 : arguments, ct->keys);
1871 2003530 : MemoryContextSwitchTo(oldcxt);
1872 : }
1873 :
1874 : /*
1875 : * Finish initializing the CatCTup header, and add it to the cache's
6142 tgl 1876 ECB : * linked list and counts.
8179 1877 : */
8179 tgl 1878 CBC 4898603 : ct->ct_magic = CT_MAGIC;
7965 1879 4898603 : ct->my_cache = cache;
7673 1880 4898603 : ct->c_list = NULL;
6840 1881 4898603 : ct->refcount = 0; /* for the moment */
8179 1882 4898603 : ct->dead = false;
7673 tgl 1883 GIC 4898603 : ct->negative = negative;
7707 tgl 1884 CBC 4898603 : ct->hash_value = hashValue;
1885 :
3825 1886 4898603 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
7719 tgl 1887 ECB :
7673 tgl 1888 GIC 4898603 : cache->cc_ntup++;
1889 4898603 : CacheHdr->ch_ntup++;
1890 :
1891 : /*
1892 : * If the hash table has become too full, enlarge the buckets array. Quite
3260 bruce 1893 ECB : * arbitrarily, we enlarge when fill factor > 2.
3503 heikki.linnakangas 1894 : */
3503 heikki.linnakangas 1895 GIC 4898603 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
3503 heikki.linnakangas 1896 CBC 10362 : RehashCatCache(cache);
1897 :
6448 tgl 1898 GIC 4898603 : return ct;
1899 : }
1900 :
1901 : /*
1902 : * Helper routine that frees keys stored in the keys array.
8179 tgl 1903 ECB : */
1904 : static void
2004 andres 1905 GIC 995986 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1906 : {
7673 tgl 1907 ECB : int i;
1908 :
2004 andres 1909 CBC 3446022 : for (i = 0; i < nkeys; i++)
1910 : {
2004 andres 1911 GIC 2450036 : int attnum = attnos[i];
1912 : Form_pg_attribute att;
2004 andres 1913 ECB :
1914 : /* system attribute are not supported in caches */
2004 andres 1915 CBC 2450036 : Assert(attnum > 0);
1916 :
1917 2450036 : att = TupleDescAttr(tupdesc, attnum - 1);
2004 andres 1918 ECB :
2004 andres 1919 GIC 2450036 : if (!att->attbyval)
2004 andres 1920 CBC 864008 : pfree(DatumGetPointer(keys[i]));
1921 : }
2004 andres 1922 GIC 995986 : }
1923 :
1924 : /*
1925 : * Helper routine that copies the keys in the srckeys array into the dstkeys
1926 : * one, guaranteeing that the datums are fully allocated in the current memory
1927 : * context.
2004 andres 1928 ECB : */
1929 : static void
2004 andres 1930 GIC 2225422 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1931 : Datum *srckeys, Datum *dstkeys)
1932 : {
1933 : int i;
1934 :
1935 : /*
1936 : * XXX: memory and lookup performance could possibly be improved by
1937 : * storing all keys in one allocation.
2004 andres 1938 ECB : */
1939 :
7673 tgl 1940 CBC 7602510 : for (i = 0; i < nkeys; i++)
7673 tgl 1941 ECB : {
2004 andres 1942 CBC 5377088 : int attnum = attnos[i];
1601 andres 1943 GIC 5377088 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1944 5377088 : Datum src = srckeys[i];
1945 : NameData srcname;
1946 :
1947 : /*
1948 : * Must be careful in case the caller passed a C string where a NAME
1949 : * is wanted: convert the given argument to a correctly padded NAME.
1950 : * Otherwise the memcpy() done by datumCopy() could fall off the end
1573 tgl 1951 ECB : * of memory.
1952 : */
1601 andres 1953 CBC 5377088 : if (att->atttypid == NAMEOID)
2004 andres 1954 ECB : {
1601 andres 1955 GIC 1397465 : namestrcpy(&srcname, DatumGetCString(src));
1956 1397465 : src = NameGetDatum(&srcname);
2004 andres 1957 ECB : }
1958 :
1601 andres 1959 CBC 5377088 : dstkeys[i] = datumCopy(src,
1601 andres 1960 GIC 5377088 : att->attbyval,
1601 andres 1961 CBC 5377088 : att->attlen);
1962 : }
9770 scrappy 1963 GIC 2225422 : }
1964 :
1965 : /*
1966 : * PrepareToInvalidateCacheTuple()
1967 : *
1968 : * This is part of a rather subtle chain of events, so pay attention:
1969 : *
1970 : * When a tuple is inserted or deleted, it cannot be flushed from the
1971 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
1972 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
1973 : * invalidations that will be done at the end of the command or transaction.
1974 : *
1975 : * The lists of tuples that need to be flushed are kept by inval.c. This
1976 : * routine is a helper routine for inval.c. Given a tuple belonging to
1977 : * the specified relation, find all catcaches it could be in, compute the
1978 : * correct hash value for each such catcache, and call the specified
1979 : * function to record the cache id and hash value in inval.c's lists.
1980 : * SysCacheInvalidate will be called later, if appropriate,
1981 : * using the recorded information.
1982 : *
1983 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1984 : * For an update, we are called just once, with tuple being the old tuple
1985 : * version and newtuple the new version. We should make two list entries
1986 : * if the tuple's hash value changed, but only one if it didn't.
1987 : *
1988 : * Note that it is irrelevant whether the given tuple is actually loaded
1989 : * into the catcache at the moment. Even if it's not there now, it might
1990 : * be by the end of the command, or there might be a matching negative entry
1991 : * to flush --- or other backends' caches might have such entries --- so
1992 : * we have to make list entries to flush it later.
1993 : *
1994 : * Also note that it's not an error if there are no catcaches for the
1995 : * specified relation. inval.c doesn't know exactly which rels have
1996 : * catcaches --- it will call this routine for any tuple that's in a
1997 : * system relation.
9770 scrappy 1998 ECB : */
1999 : void
8129 tgl 2000 GIC 2735025 : PrepareToInvalidateCacheTuple(Relation relation,
2001 : HeapTuple tuple,
2002 : HeapTuple newtuple,
2003 : void (*function) (int, uint32, Oid))
2004 : {
2005 : slist_iter iter;
2006 : Oid reloid;
2007 :
2008 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2009 :
2010 : /*
8053 bruce 2011 ECB : * sanity checks
9345 2012 : */
9345 bruce 2013 CBC 2735025 : Assert(RelationIsValid(relation));
2014 2735025 : Assert(HeapTupleIsValid(tuple));
8945 bruce 2015 GIC 2735025 : Assert(PointerIsValid(function));
7965 tgl 2016 CBC 2735025 : Assert(CacheHdr != NULL);
2017 :
7707 tgl 2018 GIC 2735025 : reloid = RelationGetRelid(relation);
2019 :
2020 : /* ----------------
2021 : * for each cache
2022 : * if the cache contains tuples from the specified relation
2023 : * compute the tuple's hash value(s) in this cache,
2024 : * and call the passed function to register the information.
2025 : * ----------------
9345 bruce 2026 ECB : */
2027 :
3825 tgl 2028 CBC 229742100 : slist_foreach(iter, &CacheHdr->ch_caches)
2029 : {
3825 tgl 2030 GIC 227007075 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2031 : uint32 hashvalue;
4254 tgl 2032 ECB : Oid dbid;
2033 :
5513 tgl 2034 GIC 227007075 : if (ccp->cc_reloid != reloid)
2035 222205761 : continue;
5513 tgl 2036 ECB :
8185 2037 : /* Just in case cache hasn't finished initialization yet... */
8185 tgl 2038 GIC 4801314 : if (ccp->cc_tupdesc == NULL)
8185 tgl 2039 CBC 3488 : CatalogCacheInitializeCache(ccp);
8185 tgl 2040 ECB :
2004 andres 2041 GIC 4801314 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
4254 tgl 2042 CBC 4801314 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2043 :
2044 4801314 : (*function) (ccp->id, hashvalue, dbid);
2045 :
4254 tgl 2046 GIC 4801314 : if (newtuple)
2047 : {
4254 tgl 2048 ECB : uint32 newhashvalue;
2049 :
2004 andres 2050 CBC 443444 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
4254 tgl 2051 ECB :
4254 tgl 2052 GIC 443444 : if (newhashvalue != hashvalue)
2053 2652 : (*function) (ccp->id, newhashvalue, dbid);
4254 tgl 2054 ECB : }
2055 : }
9345 bruce 2056 GIC 2735025 : }
2057 :
2058 :
2059 : /*
2060 : * Subroutines for warning about reference leaks. These are exported so
2061 : * that resowner.c can call them.
6589 tgl 2062 EUB : */
2063 : void
6589 tgl 2064 UBC 0 : PrintCatCacheLeakWarning(HeapTuple tuple)
2065 : {
6589 tgl 2066 UIC 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2067 : offsetof(CatCTup, tuple));
6589 tgl 2068 EUB :
2069 : /* Safety check to ensure we were handed a cache entry */
6589 tgl 2070 UBC 0 : Assert(ct->ct_magic == CT_MAGIC);
2071 :
6589 tgl 2072 UIC 0 : elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2073 : ct->my_cache->cc_relname, ct->my_cache->id,
2074 : ItemPointerGetBlockNumber(&(tuple->t_self)),
6589 tgl 2075 EUB : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2076 : ct->refcount);
6589 tgl 2077 UIC 0 : }
6589 tgl 2078 EUB :
2079 : void
6589 tgl 2080 UBC 0 : PrintCatCacheListLeakWarning(CatCList *list)
2081 : {
6589 tgl 2082 UIC 0 : elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
6589 tgl 2083 EUB : list->my_cache->cc_relname, list->my_cache->id,
2084 : list, list->refcount);
6589 tgl 2085 UIC 0 : }
|