Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/hash.h"
19 : #include "access/heapam.h"
20 : #include "access/relscan.h"
21 : #include "access/sysattr.h"
22 : #include "access/tuptoaster.h"
23 : #include "access/valid.h"
24 : #include "access/xact.h"
25 : #include "catalog/pg_operator.h"
26 : #include "catalog/pg_type.h"
27 : #include "miscadmin.h"
28 : #ifdef CATCACHE_STATS
29 : #include "storage/ipc.h" /* for on_proc_exit */
30 : #endif
31 : #include "storage/lmgr.h"
32 : #include "utils/builtins.h"
33 : #include "utils/fmgroids.h"
34 : #include "utils/inval.h"
35 : #include "utils/memutils.h"
36 : #include "utils/rel.h"
37 : #include "utils/resowner_private.h"
38 : #include "utils/syscache.h"
39 : #include "utils/tqual.h"
40 :
41 :
42 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
43 :
44 : /*
45 : * Given a hash value and the size of the hash table, find the bucket
46 : * in which the hash value belongs. Since the hash table must contain
47 : * a power-of-2 number of elements, this is a simple bitmask.
48 : */
49 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
50 :
51 :
52 : /*
53 : * variables, macros and other stuff
54 : */
55 :
56 : #ifdef CACHEDEBUG
57 : #define CACHE1_elog(a,b) elog(a,b)
58 : #define CACHE2_elog(a,b,c) elog(a,b,c)
59 : #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
60 : #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
61 : #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
62 : #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
63 : #else
64 : #define CACHE1_elog(a,b)
65 : #define CACHE2_elog(a,b,c)
66 : #define CACHE3_elog(a,b,c,d)
67 : #define CACHE4_elog(a,b,c,d,e)
68 : #define CACHE5_elog(a,b,c,d,e,f)
69 : #define CACHE6_elog(a,b,c,d,e,f,g)
70 : #endif
71 :
72 : /* Cache management header --- pointer is NULL until created */
73 : static CatCacheHeader *CacheHdr = NULL;
74 :
75 :
76 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
77 : ScanKey cur_skey);
78 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
79 : HeapTuple tuple);
80 :
81 : #ifdef CATCACHE_STATS
82 : static void CatCachePrintStats(int code, Datum arg);
83 : #endif
84 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
85 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
86 : static void CatalogCacheInitializeCache(CatCache *cache);
87 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
88 : uint32 hashValue, Index hashIndex,
89 : bool negative);
90 : static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
91 :
92 :
93 : /*
94 : * internal support functions
95 : */
96 :
97 : /*
98 : * Look up the hash and equality functions for system types that are used
99 : * as cache key fields.
100 : *
101 : * XXX this should be replaced by catalog lookups,
102 : * but that seems to pose considerable risk of circularity...
103 : */
104 : static void
105 13199 : GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
106 : {
107 13199 : switch (keytype)
108 : {
109 : case BOOLOID:
110 183 : *hashfunc = hashchar;
111 :
112 183 : *eqfunc = F_BOOLEQ;
113 183 : break;
114 : case CHAROID:
115 459 : *hashfunc = hashchar;
116 :
117 459 : *eqfunc = F_CHAREQ;
118 459 : break;
119 : case NAMEOID:
120 2292 : *hashfunc = hashname;
121 :
122 2292 : *eqfunc = F_NAMEEQ;
123 2292 : break;
124 : case INT2OID:
125 735 : *hashfunc = hashint2;
126 :
127 735 : *eqfunc = F_INT2EQ;
128 735 : break;
129 : case INT4OID:
130 104 : *hashfunc = hashint4;
131 :
132 104 : *eqfunc = F_INT4EQ;
133 104 : break;
134 : case TEXTOID:
135 17 : *hashfunc = hashtext;
136 :
137 17 : *eqfunc = F_TEXTEQ;
138 17 : break;
139 : case OIDOID:
140 : case REGPROCOID:
141 : case REGPROCEDUREOID:
142 : case REGOPEROID:
143 : case REGOPERATOROID:
144 : case REGCLASSOID:
145 : case REGTYPEOID:
146 : case REGCONFIGOID:
147 : case REGDICTIONARYOID:
148 : case REGROLEOID:
149 : case REGNAMESPACEOID:
150 9221 : *hashfunc = hashoid;
151 :
152 9221 : *eqfunc = F_OIDEQ;
153 9221 : break;
154 : case OIDVECTOROID:
155 188 : *hashfunc = hashoidvector;
156 :
157 188 : *eqfunc = F_OIDVECTOREQ;
158 188 : break;
159 : default:
160 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
161 : *hashfunc = NULL; /* keep compiler quiet */
162 :
163 : *eqfunc = InvalidOid;
164 : break;
165 : }
166 13199 : }
167 :
168 : /*
169 : * CatalogCacheComputeHashValue
170 : *
171 : * Compute the hash value associated with a given set of lookup keys
172 : */
173 : static uint32
174 2435746 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
175 : {
176 2435746 : uint32 hashValue = 0;
177 : uint32 oneHash;
178 :
179 : CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
180 : cache->cc_relname,
181 : nkeys,
182 : cache);
183 :
184 2435746 : switch (nkeys)
185 : {
186 : case 4:
187 115329 : oneHash =
188 115329 : DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
189 : cur_skey[3].sk_argument));
190 115329 : hashValue ^= oneHash << 24;
191 115329 : hashValue ^= oneHash >> 8;
192 : /* FALLTHROUGH */
193 : case 3:
194 305204 : oneHash =
195 305204 : DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
196 : cur_skey[2].sk_argument));
197 305204 : hashValue ^= oneHash << 16;
198 305204 : hashValue ^= oneHash >> 16;
199 : /* FALLTHROUGH */
200 : case 2:
201 677603 : oneHash =
202 677603 : DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
203 : cur_skey[1].sk_argument));
204 677603 : hashValue ^= oneHash << 8;
205 677603 : hashValue ^= oneHash >> 24;
206 : /* FALLTHROUGH */
207 : case 1:
208 2435746 : oneHash =
209 2435746 : DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
210 : cur_skey[0].sk_argument));
211 2435746 : hashValue ^= oneHash;
212 2435746 : break;
213 : default:
214 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
215 : break;
216 : }
217 :
218 2435746 : return hashValue;
219 : }
220 :
221 : /*
222 : * CatalogCacheComputeTupleHashValue
223 : *
224 : * Compute the hash value associated with a given tuple to be cached
225 : */
226 : static uint32
227 190162 : CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
228 : {
229 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
230 190162 : bool isNull = false;
231 :
232 : /* Copy pre-initialized overhead data for scankey */
233 190162 : memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
234 :
235 : /* Now extract key fields from tuple, insert into scankey */
236 190162 : switch (cache->cc_nkeys)
237 : {
238 : case 4:
239 18489 : cur_skey[3].sk_argument =
240 18489 : (cache->cc_key[3] == ObjectIdAttributeNumber)
241 289 : ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
242 18778 : : fastgetattr(tuple,
243 : cache->cc_key[3],
244 : cache->cc_tupdesc,
245 : &isNull);
246 18489 : Assert(!isNull);
247 : /* FALLTHROUGH */
248 : case 3:
249 35279 : cur_skey[2].sk_argument =
250 35279 : (cache->cc_key[2] == ObjectIdAttributeNumber)
251 0 : ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
252 35279 : : fastgetattr(tuple,
253 : cache->cc_key[2],
254 : cache->cc_tupdesc,
255 : &isNull);
256 35279 : Assert(!isNull);
257 : /* FALLTHROUGH */
258 : case 2:
259 155240 : cur_skey[1].sk_argument =
260 155240 : (cache->cc_key[1] == ObjectIdAttributeNumber)
261 0 : ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
262 155240 : : fastgetattr(tuple,
263 : cache->cc_key[1],
264 : cache->cc_tupdesc,
265 : &isNull);
266 155240 : Assert(!isNull);
267 : /* FALLTHROUGH */
268 : case 1:
269 190162 : cur_skey[0].sk_argument =
270 190162 : (cache->cc_key[0] == ObjectIdAttributeNumber)
271 31240 : ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
272 221402 : : fastgetattr(tuple,
273 : cache->cc_key[0],
274 : cache->cc_tupdesc,
275 : &isNull);
276 190162 : Assert(!isNull);
277 190162 : break;
278 : default:
279 0 : elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
280 : break;
281 : }
282 :
283 190162 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
284 : }
285 :
286 :
287 : #ifdef CATCACHE_STATS
288 :
289 : static void
290 : CatCachePrintStats(int code, Datum arg)
291 : {
292 : slist_iter iter;
293 : long cc_searches = 0;
294 : long cc_hits = 0;
295 : long cc_neg_hits = 0;
296 : long cc_newloads = 0;
297 : long cc_invals = 0;
298 : long cc_lsearches = 0;
299 : long cc_lhits = 0;
300 :
301 : slist_foreach(iter, &CacheHdr->ch_caches)
302 : {
303 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
304 :
305 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
306 : continue; /* don't print unused caches */
307 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
308 : cache->cc_relname,
309 : cache->cc_indexoid,
310 : cache->cc_ntup,
311 : cache->cc_searches,
312 : cache->cc_hits,
313 : cache->cc_neg_hits,
314 : cache->cc_hits + cache->cc_neg_hits,
315 : cache->cc_newloads,
316 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
317 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
318 : cache->cc_invals,
319 : cache->cc_lsearches,
320 : cache->cc_lhits);
321 : cc_searches += cache->cc_searches;
322 : cc_hits += cache->cc_hits;
323 : cc_neg_hits += cache->cc_neg_hits;
324 : cc_newloads += cache->cc_newloads;
325 : cc_invals += cache->cc_invals;
326 : cc_lsearches += cache->cc_lsearches;
327 : cc_lhits += cache->cc_lhits;
328 : }
329 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
330 : CacheHdr->ch_ntup,
331 : cc_searches,
332 : cc_hits,
333 : cc_neg_hits,
334 : cc_hits + cc_neg_hits,
335 : cc_newloads,
336 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
337 : cc_searches - cc_hits - cc_neg_hits,
338 : cc_invals,
339 : cc_lsearches,
340 : cc_lhits);
341 : }
342 : #endif /* CATCACHE_STATS */
343 :
344 :
345 : /*
346 : * CatCacheRemoveCTup
347 : *
348 : * Unlink and delete the given cache entry
349 : *
350 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
351 : * Both the cache entry and the list had better have zero refcount.
352 : */
353 : static void
354 46424 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
355 : {
356 46424 : Assert(ct->refcount == 0);
357 46424 : Assert(ct->my_cache == cache);
358 :
359 46424 : if (ct->c_list)
360 : {
361 : /*
362 : * The cleanest way to handle this is to call CatCacheRemoveCList,
363 : * which will recurse back to me, and the recursive call will do the
364 : * work. Set the "dead" flag to make sure it does recurse.
365 : */
366 0 : ct->dead = true;
367 0 : CatCacheRemoveCList(cache, ct->c_list);
368 46424 : return; /* nothing left to do */
369 : }
370 :
371 : /* delink from linked list */
372 46424 : dlist_delete(&ct->cache_elem);
373 :
374 : /* free associated tuple data */
375 46424 : if (ct->tuple.t_data != NULL)
376 46424 : pfree(ct->tuple.t_data);
377 46424 : pfree(ct);
378 :
379 46424 : --cache->cc_ntup;
380 46424 : --CacheHdr->ch_ntup;
381 : }
382 :
383 : /*
384 : * CatCacheRemoveCList
385 : *
386 : * Unlink and delete the given cache list entry
387 : *
388 : * NB: any dead member entries that become unreferenced are deleted too.
389 : */
390 : static void
391 5091 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
392 : {
393 : int i;
394 :
395 5091 : Assert(cl->refcount == 0);
396 5091 : Assert(cl->my_cache == cache);
397 :
398 : /* delink from member tuples */
399 23813 : for (i = cl->n_members; --i >= 0;)
400 : {
401 13631 : CatCTup *ct = cl->members[i];
402 :
403 13631 : Assert(ct->c_list == cl);
404 13631 : ct->c_list = NULL;
405 : /* if the member is dead and now has no references, remove it */
406 13631 : if (
407 : #ifndef CATCACHE_FORCE_RELEASE
408 13655 : ct->dead &&
409 : #endif
410 24 : ct->refcount == 0)
411 24 : CatCacheRemoveCTup(cache, ct);
412 : }
413 :
414 : /* delink from linked list */
415 5091 : dlist_delete(&cl->cache_elem);
416 :
417 : /* free associated tuple data */
418 5091 : if (cl->tuple.t_data != NULL)
419 5091 : pfree(cl->tuple.t_data);
420 5091 : pfree(cl);
421 5091 : }
422 :
423 :
424 : /*
425 : * CatCacheInvalidate
426 : *
427 : * Invalidate entries in the specified cache, given a hash value.
428 : *
429 : * We delete cache entries that match the hash value, whether positive
430 : * or negative. We don't care whether the invalidation is the result
431 : * of a tuple insertion or a deletion.
432 : *
433 : * We used to try to match positive cache entries by TID, but that is
434 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
435 : * be queued before VACUUM FULL, and then processed afterwards, when the
436 : * target tuple that has to be invalidated has a different TID than it
437 : * did when the event was created. So now we just compare hash values and
438 : * accept the small risk of unnecessary invalidations due to false matches.
439 : *
440 : * This routine is only quasi-public: it should only be used by inval.c.
441 : */
442 : void
443 679694 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
444 : {
445 : Index hashIndex;
446 : dlist_mutable_iter iter;
447 :
448 : CACHE1_elog(DEBUG2, "CatCacheInvalidate: called");
449 :
450 : /*
451 : * We don't bother to check whether the cache has finished initialization
452 : * yet; if not, there will be no entries in it so no problem.
453 : */
454 :
455 : /*
456 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
457 : * searches might still be correct, so just zap 'em all.
458 : */
459 684613 : dlist_foreach_modify(iter, &cache->cc_lists)
460 : {
461 4919 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
462 :
463 4919 : if (cl->refcount > 0)
464 24 : cl->dead = true;
465 : else
466 4895 : CatCacheRemoveCList(cache, cl);
467 : }
468 :
469 : /*
470 : * inspect the proper hash bucket for tuple matches
471 : */
472 679694 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
473 887837 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
474 : {
475 208143 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
476 :
477 208143 : if (hashValue == ct->hash_value)
478 : {
479 83228 : if (ct->refcount > 0 ||
480 41616 : (ct->c_list && ct->c_list->refcount > 0))
481 : {
482 68 : ct->dead = true;
483 : /* list, if any, was marked dead above */
484 68 : Assert(ct->c_list == NULL || ct->c_list->dead);
485 : }
486 : else
487 41568 : CatCacheRemoveCTup(cache, ct);
488 : CACHE1_elog(DEBUG2, "CatCacheInvalidate: invalidated");
489 : #ifdef CATCACHE_STATS
490 : cache->cc_invals++;
491 : #endif
492 : /* could be multiple matches, so keep looking! */
493 : }
494 : }
495 679694 : }
496 :
497 : /* ----------------------------------------------------------------
498 : * public functions
499 : * ----------------------------------------------------------------
500 : */
501 :
502 :
503 : /*
504 : * Standard routine for creating cache context if it doesn't exist yet
505 : *
506 : * There are a lot of places (probably far more than necessary) that check
507 : * whether CacheMemoryContext exists yet and want to create it if not.
508 : * We centralize knowledge of exactly how to create it here.
509 : */
510 : void
511 338 : CreateCacheMemoryContext(void)
512 : {
513 : /*
514 : * Purely for paranoia, check that context doesn't exist; caller probably
515 : * did so already.
516 : */
517 338 : if (!CacheMemoryContext)
518 338 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
519 : "CacheMemoryContext",
520 : ALLOCSET_DEFAULT_SIZES);
521 338 : }
522 :
523 :
524 : /*
525 : * ResetCatalogCache
526 : *
527 : * Reset one catalog cache to empty.
528 : *
529 : * This is not very efficient if the target cache is nearly empty.
530 : * However, it shouldn't need to be efficient; we don't invoke it often.
531 : */
532 : static void
533 9657 : ResetCatalogCache(CatCache *cache)
534 : {
535 : dlist_mutable_iter iter;
536 : int i;
537 :
538 : /* Remove each list in this cache, or at least mark it dead */
539 9852 : dlist_foreach_modify(iter, &cache->cc_lists)
540 : {
541 195 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
542 :
543 195 : if (cl->refcount > 0)
544 0 : cl->dead = true;
545 : else
546 195 : CatCacheRemoveCList(cache, cl);
547 : }
548 :
549 : /* Remove each tuple in this cache, or at least mark it dead */
550 298523 : for (i = 0; i < cache->cc_nbuckets; i++)
551 : {
552 288866 : dlist_head *bucket = &cache->cc_bucket[i];
553 :
554 293654 : dlist_foreach_modify(iter, bucket)
555 : {
556 4788 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
557 :
558 9576 : if (ct->refcount > 0 ||
559 4788 : (ct->c_list && ct->c_list->refcount > 0))
560 : {
561 0 : ct->dead = true;
562 : /* list, if any, was marked dead above */
563 0 : Assert(ct->c_list == NULL || ct->c_list->dead);
564 : }
565 : else
566 4788 : CatCacheRemoveCTup(cache, ct);
567 : #ifdef CATCACHE_STATS
568 : cache->cc_invals++;
569 : #endif
570 : }
571 : }
572 9657 : }
573 :
574 : /*
575 : * ResetCatalogCaches
576 : *
577 : * Reset all caches when a shared cache inval event forces it
578 : */
579 : void
580 125 : ResetCatalogCaches(void)
581 : {
582 : slist_iter iter;
583 :
584 : CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
585 :
586 9750 : slist_foreach(iter, &CacheHdr->ch_caches)
587 : {
588 9625 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
589 :
590 9625 : ResetCatalogCache(cache);
591 : }
592 :
593 : CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
594 125 : }
595 :
596 : /*
597 : * CatalogCacheFlushCatalog
598 : *
599 : * Flush all catcache entries that came from the specified system catalog.
600 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
601 : * tuples very likely now have different TIDs than before. (At one point
602 : * we also tried to force re-execution of CatalogCacheInitializeCache for
603 : * the cache(s) on that catalog. This is a bad idea since it leads to all
604 : * kinds of trouble if a cache flush occurs while loading cache entries.
605 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
606 : * rather than relying on the relcache to keep a tupdesc for us. Of course
607 : * this assumes the tupdesc of a cachable system table will not change...)
608 : */
609 : void
610 20 : CatalogCacheFlushCatalog(Oid catId)
611 : {
612 : slist_iter iter;
613 :
614 : CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
615 :
616 1560 : slist_foreach(iter, &CacheHdr->ch_caches)
617 : {
618 1540 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
619 :
620 : /* Does this cache store tuples of the target catalog? */
621 1540 : if (cache->cc_reloid == catId)
622 : {
623 : /* Yes, so flush all its contents */
624 32 : ResetCatalogCache(cache);
625 :
626 : /* Tell inval.c to call syscache callbacks for this cache */
627 32 : CallSyscacheCallbacks(cache->id, 0);
628 : }
629 : }
630 :
631 : CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
632 20 : }
633 :
634 : /*
635 : * InitCatCache
636 : *
637 : * This allocates and initializes a cache for a system catalog relation.
638 : * Actually, the cache is only partially initialized to avoid opening the
639 : * relation. The relation will be opened and the rest of the cache
640 : * structure initialized on the first access.
641 : */
642 : #ifdef CACHEDEBUG
643 : #define InitCatCache_DEBUG2 \
644 : do { \
645 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
646 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
647 : cp->cc_nkeys, cp->cc_nbuckets); \
648 : } while(0)
649 : #else
650 : #define InitCatCache_DEBUG2
651 : #endif
652 :
653 : CatCache *
654 26026 : InitCatCache(int id,
655 : Oid reloid,
656 : Oid indexoid,
657 : int nkeys,
658 : const int *key,
659 : int nbuckets)
660 : {
661 : CatCache *cp;
662 : MemoryContext oldcxt;
663 : int i;
664 :
665 : /*
666 : * nbuckets is the initial number of hash buckets to use in this catcache.
667 : * It will be enlarged later if it becomes too full.
668 : *
669 : * nbuckets must be a power of two. We check this via Assert rather than
670 : * a full runtime check because the values will be coming from constant
671 : * tables.
672 : *
673 : * If you're confused by the power-of-two check, see comments in
674 : * bitmapset.c for an explanation.
675 : */
676 26026 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
677 :
678 : /*
679 : * first switch to the cache context so our allocations do not vanish at
680 : * the end of a transaction
681 : */
682 26026 : if (!CacheMemoryContext)
683 0 : CreateCacheMemoryContext();
684 :
685 26026 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
686 :
687 : /*
688 : * if first time through, initialize the cache group header
689 : */
690 26026 : if (CacheHdr == NULL)
691 : {
692 338 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
693 338 : slist_init(&CacheHdr->ch_caches);
694 338 : CacheHdr->ch_ntup = 0;
695 : #ifdef CATCACHE_STATS
696 : /* set up to dump stats at backend exit */
697 : on_proc_exit(CatCachePrintStats, 0);
698 : #endif
699 : }
700 :
701 : /*
702 : * allocate a new cache structure
703 : *
704 : * Note: we rely on zeroing to initialize all the dlist headers correctly
705 : */
706 26026 : cp = (CatCache *) palloc0(sizeof(CatCache));
707 26026 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
708 :
709 : /*
710 : * initialize the cache's relation information for the relation
711 : * corresponding to this cache, and initialize some of the new cache's
712 : * other internal fields. But don't open the relation yet.
713 : */
714 26026 : cp->id = id;
715 26026 : cp->cc_relname = "(not known yet)";
716 26026 : cp->cc_reloid = reloid;
717 26026 : cp->cc_indexoid = indexoid;
718 26026 : cp->cc_relisshared = false; /* temporary */
719 26026 : cp->cc_tupdesc = (TupleDesc) NULL;
720 26026 : cp->cc_ntup = 0;
721 26026 : cp->cc_nbuckets = nbuckets;
722 26026 : cp->cc_nkeys = nkeys;
723 68276 : for (i = 0; i < nkeys; ++i)
724 42250 : cp->cc_key[i] = key[i];
725 :
726 : /*
727 : * new cache is initialized as far as we can go for now. print some
728 : * debugging information, if appropriate.
729 : */
730 : InitCatCache_DEBUG2;
731 :
732 : /*
733 : * add completed cache to top of group header's list
734 : */
735 26026 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
736 :
737 : /*
738 : * back to the old context before we return...
739 : */
740 26026 : MemoryContextSwitchTo(oldcxt);
741 :
742 26026 : return cp;
743 : }
744 :
745 : /*
746 : * Enlarge a catcache, doubling the number of buckets.
747 : */
748 : static void
749 111 : RehashCatCache(CatCache *cp)
750 : {
751 : dlist_head *newbucket;
752 : int newnbuckets;
753 : int i;
754 :
755 111 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
756 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
757 :
758 : /* Allocate a new, larger, hash table. */
759 111 : newnbuckets = cp->cc_nbuckets * 2;
760 111 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
761 :
762 : /* Move all entries from old hash table to new. */
763 6993 : for (i = 0; i < cp->cc_nbuckets; i++)
764 : {
765 : dlist_mutable_iter iter;
766 :
767 20757 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
768 : {
769 13875 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
770 13875 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
771 :
772 13875 : dlist_delete(iter.cur);
773 13875 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
774 : }
775 : }
776 :
777 : /* Switch to the new array. */
778 111 : pfree(cp->cc_bucket);
779 111 : cp->cc_nbuckets = newnbuckets;
780 111 : cp->cc_bucket = newbucket;
781 111 : }
782 :
783 : /*
784 : * CatalogCacheInitializeCache
785 : *
786 : * This function does final initialization of a catcache: obtain the tuple
787 : * descriptor and set up the hash and equality function links. We assume
788 : * that the relcache entry can be opened at this point!
789 : */
790 : #ifdef CACHEDEBUG
791 : #define CatalogCacheInitializeCache_DEBUG1 \
792 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
793 : cache->cc_reloid)
794 :
795 : #define CatalogCacheInitializeCache_DEBUG2 \
796 : do { \
797 : if (cache->cc_key[i] > 0) { \
798 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
799 : i+1, cache->cc_nkeys, cache->cc_key[i], \
800 : TupleDescAttr(tupdesc, cache->cc_key[i] - 1)->atttypid); \
801 : } else { \
802 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
803 : i+1, cache->cc_nkeys, cache->cc_key[i]); \
804 : } \
805 : } while(0)
806 : #else
807 : #define CatalogCacheInitializeCache_DEBUG1
808 : #define CatalogCacheInitializeCache_DEBUG2
809 : #endif
810 :
811 : static void
812 8078 : CatalogCacheInitializeCache(CatCache *cache)
813 : {
814 : Relation relation;
815 : MemoryContext oldcxt;
816 : TupleDesc tupdesc;
817 : int i;
818 :
819 : CatalogCacheInitializeCache_DEBUG1;
820 :
821 8078 : relation = heap_open(cache->cc_reloid, AccessShareLock);
822 :
823 : /*
824 : * switch to the cache context so our allocations do not vanish at the end
825 : * of a transaction
826 : */
827 8078 : Assert(CacheMemoryContext != NULL);
828 :
829 8078 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
830 :
831 : /*
832 : * copy the relcache's tuple descriptor to permanent cache storage
833 : */
834 8078 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
835 :
836 : /*
837 : * save the relation's name and relisshared flag, too (cc_relname is used
838 : * only for debugging purposes)
839 : */
840 8078 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
841 8078 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
842 :
843 : /*
844 : * return to the caller's memory context and close the rel
845 : */
846 8078 : MemoryContextSwitchTo(oldcxt);
847 :
848 8078 : heap_close(relation, AccessShareLock);
849 :
850 : CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
851 : cache->cc_relname, cache->cc_nkeys);
852 :
853 : /*
854 : * initialize cache's key information
855 : */
856 21277 : for (i = 0; i < cache->cc_nkeys; ++i)
857 : {
858 : Oid keytype;
859 : RegProcedure eqfunc;
860 :
861 : CatalogCacheInitializeCache_DEBUG2;
862 :
863 13199 : if (cache->cc_key[i] > 0)
864 : {
865 9781 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
866 : cache->cc_key[i] - 1);
867 :
868 9781 : keytype = attr->atttypid;
869 : /* cache key columns should always be NOT NULL */
870 9781 : Assert(attr->attnotnull);
871 : }
872 : else
873 : {
874 3418 : if (cache->cc_key[i] != ObjectIdAttributeNumber)
875 0 : elog(FATAL, "only sys attr supported in caches is OID");
876 3418 : keytype = OIDOID;
877 : }
878 :
879 13199 : GetCCHashEqFuncs(keytype,
880 : &cache->cc_hashfunc[i],
881 : &eqfunc);
882 :
883 13199 : cache->cc_isname[i] = (keytype == NAMEOID);
884 :
885 : /*
886 : * Do equality-function lookup (we assume this won't need a catalog
887 : * lookup for any supported type)
888 : */
889 13199 : fmgr_info_cxt(eqfunc,
890 : &cache->cc_skey[i].sk_func,
891 : CacheMemoryContext);
892 :
893 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
894 13199 : cache->cc_skey[i].sk_attno = cache->cc_key[i];
895 :
896 : /* Fill in sk_strategy as well --- always standard equality */
897 13199 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
898 13199 : cache->cc_skey[i].sk_subtype = InvalidOid;
899 : /* Currently, there are no catcaches on collation-aware data types */
900 13199 : cache->cc_skey[i].sk_collation = InvalidOid;
901 :
902 : CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
903 : cache->cc_relname,
904 : i,
905 : cache);
906 : }
907 :
908 : /*
909 : * mark this cache fully initialized
910 : */
911 8078 : cache->cc_tupdesc = tupdesc;
912 8078 : }
913 :
914 : /*
915 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
916 : *
917 : * One reason to call this routine is to ensure that the relcache has
918 : * created entries for all the catalogs and indexes referenced by catcaches.
919 : * Therefore, provide an option to open the index as well as fixing the
920 : * cache itself. An exception is the indexes on pg_am, which we don't use
921 : * (cf. IndexScanOK).
922 : */
923 : void
924 1155 : InitCatCachePhase2(CatCache *cache, bool touch_index)
925 : {
926 1155 : if (cache->cc_tupdesc == NULL)
927 1082 : CatalogCacheInitializeCache(cache);
928 :
929 2310 : if (touch_index &&
930 2295 : cache->id != AMOID &&
931 1140 : cache->id != AMNAME)
932 : {
933 : Relation idesc;
934 :
935 : /*
936 : * We must lock the underlying catalog before opening the index to
937 : * avoid deadlock, since index_open could possibly result in reading
938 : * this same catalog, and if anyone else is exclusive-locking this
939 : * catalog and index they'll be doing it in that order.
940 : */
941 1125 : LockRelationOid(cache->cc_reloid, AccessShareLock);
942 1125 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
943 :
944 : /*
945 : * While we've got the index open, let's check that it's unique (and
946 : * not just deferrable-unique, thank you very much). This is just to
947 : * catch thinkos in definitions of new catcaches, so we don't worry
948 : * about the pg_am indexes not getting tested.
949 : */
950 1125 : Assert(idesc->rd_index->indisunique &&
951 : idesc->rd_index->indimmediate);
952 :
953 1125 : index_close(idesc, AccessShareLock);
954 1125 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
955 : }
956 1155 : }
957 :
958 :
959 : /*
960 : * IndexScanOK
961 : *
962 : * This function checks for tuples that will be fetched by
963 : * IndexSupportInitialize() during relcache initialization for
964 : * certain system indexes that support critical syscaches.
965 : * We can't use an indexscan to fetch these, else we'll get into
966 : * infinite recursion. A plain heap scan will work, however.
967 : * Once we have completed relcache initialization (signaled by
968 : * criticalRelcachesBuilt), we don't have to worry anymore.
969 : *
970 : * Similarly, during backend startup we have to be able to use the
971 : * pg_authid and pg_auth_members syscaches for authentication even if
972 : * we don't yet have relcache entries for those catalogs' indexes.
973 : */
974 : static bool
975 130361 : IndexScanOK(CatCache *cache, ScanKey cur_skey)
976 : {
977 130361 : switch (cache->id)
978 : {
979 : case INDEXRELID:
980 :
981 : /*
982 : * Rather than tracking exactly which indexes have to be loaded
983 : * before we can use indexscans (which changes from time to time),
984 : * just force all pg_index searches to be heap scans until we've
985 : * built the critical relcaches.
986 : */
987 7529 : if (!criticalRelcachesBuilt)
988 232 : return false;
989 7297 : break;
990 :
991 : case AMOID:
992 : case AMNAME:
993 :
994 : /*
995 : * Always do heap scans in pg_am, because it's so small there's
996 : * not much point in an indexscan anyway. We *must* do this when
997 : * initially building critical relcache entries, but we might as
998 : * well just always do it.
999 : */
1000 627 : return false;
1001 :
1002 : case AUTHNAME:
1003 : case AUTHOID:
1004 : case AUTHMEMMEMROLE:
1005 :
1006 : /*
1007 : * Protect authentication lookups occurring before relcache has
1008 : * collected entries for shared indexes.
1009 : */
1010 1452 : if (!criticalSharedRelcachesBuilt)
1011 2 : return false;
1012 1450 : break;
1013 :
1014 : default:
1015 120753 : break;
1016 : }
1017 :
1018 : /* Normal case, allow index scan */
1019 129500 : return true;
1020 : }
1021 :
1022 : /*
1023 : * SearchCatCache
1024 : *
1025 : * This call searches a system cache for a tuple, opening the relation
1026 : * if necessary (on the first access to a particular cache).
1027 : *
1028 : * The result is NULL if not found, or a pointer to a HeapTuple in
1029 : * the cache. The caller must not modify the tuple, and must call
1030 : * ReleaseCatCache() when done with it.
1031 : *
1032 : * The search key values should be expressed as Datums of the key columns'
1033 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1034 : * exception, the passed-in key for a NAME column can be just a C string;
1035 : * the caller need not go to the trouble of converting it to a fully
1036 : * null-padded NAME.
1037 : */
1038 : HeapTuple
1039 2138005 : SearchCatCache(CatCache *cache,
1040 : Datum v1,
1041 : Datum v2,
1042 : Datum v3,
1043 : Datum v4)
1044 : {
1045 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1046 : uint32 hashValue;
1047 : Index hashIndex;
1048 : dlist_iter iter;
1049 : dlist_head *bucket;
1050 : CatCTup *ct;
1051 : Relation relation;
1052 : SysScanDesc scandesc;
1053 : HeapTuple ntp;
1054 :
1055 : /* Make sure we're in an xact, even if this ends up being a cache hit */
1056 2138005 : Assert(IsTransactionState());
1057 :
1058 : /*
1059 : * one-time startup overhead for each cache
1060 : */
1061 2138005 : if (cache->cc_tupdesc == NULL)
1062 5780 : CatalogCacheInitializeCache(cache);
1063 :
1064 : #ifdef CATCACHE_STATS
1065 : cache->cc_searches++;
1066 : #endif
1067 :
1068 : /*
1069 : * initialize the search key information
1070 : */
1071 2138005 : memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1072 2138005 : cur_skey[0].sk_argument = v1;
1073 2138005 : cur_skey[1].sk_argument = v2;
1074 2138005 : cur_skey[2].sk_argument = v3;
1075 2138005 : cur_skey[3].sk_argument = v4;
1076 :
1077 : /*
1078 : * find the hash bucket in which to look for the tuple
1079 : */
1080 2138005 : hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1081 2138005 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1082 :
1083 : /*
1084 : * scan the hash bucket until we find a match or exhaust our tuples
1085 : *
1086 : * Note: it's okay to use dlist_foreach here, even though we modify the
1087 : * dlist within the loop, because we don't continue the loop afterwards.
1088 : */
1089 2138005 : bucket = &cache->cc_bucket[hashIndex];
1090 2299302 : dlist_foreach(iter, bucket)
1091 : {
1092 : bool res;
1093 :
1094 2178946 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1095 :
1096 2178946 : if (ct->dead)
1097 0 : continue; /* ignore dead entries */
1098 :
1099 2178946 : if (ct->hash_value != hashValue)
1100 161297 : continue; /* quickly skip entry if wrong hash val */
1101 :
1102 : /*
1103 : * see if the cached tuple matches our key.
1104 : */
1105 2017649 : HeapKeyTest(&ct->tuple,
1106 : cache->cc_tupdesc,
1107 : cache->cc_nkeys,
1108 : cur_skey,
1109 : res);
1110 2017649 : if (!res)
1111 0 : continue;
1112 :
1113 : /*
1114 : * We found a match in the cache. Move it to the front of the list
1115 : * for its hashbucket, in order to speed subsequent searches. (The
1116 : * most frequently accessed elements in any hashbucket will tend to be
1117 : * near the front of the hashbucket's list.)
1118 : */
1119 2017649 : dlist_move_head(bucket, &ct->cache_elem);
1120 :
1121 : /*
1122 : * If it's a positive entry, bump its refcount and return it. If it's
1123 : * negative, we can report failure to the caller.
1124 : */
1125 2017649 : if (!ct->negative)
1126 : {
1127 1862718 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1128 1862718 : ct->refcount++;
1129 1862718 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1130 :
1131 : CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1132 : cache->cc_relname, hashIndex);
1133 :
1134 : #ifdef CATCACHE_STATS
1135 : cache->cc_hits++;
1136 : #endif
1137 :
1138 1862718 : return &ct->tuple;
1139 : }
1140 : else
1141 : {
1142 : CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1143 : cache->cc_relname, hashIndex);
1144 :
1145 : #ifdef CATCACHE_STATS
1146 : cache->cc_neg_hits++;
1147 : #endif
1148 :
1149 154931 : return NULL;
1150 : }
1151 : }
1152 :
1153 : /*
1154 : * Tuple was not found in cache, so we have to try to retrieve it directly
1155 : * from the relation. If found, we will add it to the cache; if not
1156 : * found, we will add a negative cache entry instead.
1157 : *
1158 : * NOTE: it is possible for recursive cache lookups to occur while reading
1159 : * the relation --- for example, due to shared-cache-inval messages being
1160 : * processed during heap_open(). This is OK. It's even possible for one
1161 : * of those lookups to find and enter the very same tuple we are trying to
1162 : * fetch here. If that happens, we will enter a second copy of the tuple
1163 : * into the cache. The first copy will never be referenced again, and
1164 : * will eventually age out of the cache, so there's no functional problem.
1165 : * This case is rare enough that it's not worth expending extra cycles to
1166 : * detect.
1167 : */
1168 120356 : relation = heap_open(cache->cc_reloid, AccessShareLock);
1169 :
1170 240712 : scandesc = systable_beginscan(relation,
1171 : cache->cc_indexoid,
1172 120356 : IndexScanOK(cache, cur_skey),
1173 : NULL,
1174 : cache->cc_nkeys,
1175 : cur_skey);
1176 :
1177 120356 : ct = NULL;
1178 :
1179 120356 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1180 : {
1181 78139 : ct = CatalogCacheCreateEntry(cache, ntp,
1182 : hashValue, hashIndex,
1183 : false);
1184 : /* immediately set the refcount to 1 */
1185 78139 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1186 78139 : ct->refcount++;
1187 78139 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1188 78139 : break; /* assume only one match */
1189 : }
1190 :
1191 120356 : systable_endscan(scandesc);
1192 :
1193 120356 : heap_close(relation, AccessShareLock);
1194 :
1195 : /*
1196 : * If tuple was not found, we need to build a negative cache entry
1197 : * containing a fake tuple. The fake tuple has the correct key columns,
1198 : * but nulls everywhere else.
1199 : *
1200 : * In bootstrap mode, we don't build negative entries, because the cache
1201 : * invalidation mechanism isn't alive and can't clear them if the tuple
1202 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1203 : * cache inval for that.)
1204 : */
1205 120356 : if (ct == NULL)
1206 : {
1207 42217 : if (IsBootstrapProcessingMode())
1208 345 : return NULL;
1209 :
1210 41872 : ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1211 41872 : ct = CatalogCacheCreateEntry(cache, ntp,
1212 : hashValue, hashIndex,
1213 : true);
1214 41872 : heap_freetuple(ntp);
1215 :
1216 : CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1217 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1218 : CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1219 : cache->cc_relname, hashIndex);
1220 :
1221 : /*
1222 : * We are not returning the negative entry to the caller, so leave its
1223 : * refcount zero.
1224 : */
1225 :
1226 41872 : return NULL;
1227 : }
1228 :
1229 : CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1230 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1231 : CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1232 : cache->cc_relname, hashIndex);
1233 :
1234 : #ifdef CATCACHE_STATS
1235 : cache->cc_newloads++;
1236 : #endif
1237 :
1238 78139 : return &ct->tuple;
1239 : }
1240 :
1241 : /*
1242 : * ReleaseCatCache
1243 : *
1244 : * Decrement the reference count of a catcache entry (releasing the
1245 : * hold grabbed by a successful SearchCatCache).
1246 : *
1247 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1248 : * will be freed as soon as their refcount goes to zero. In combination
1249 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1250 : * to catch references to already-released catcache entries.
1251 : */
1252 : void
1253 1940857 : ReleaseCatCache(HeapTuple tuple)
1254 : {
1255 1940857 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1256 : offsetof(CatCTup, tuple));
1257 :
1258 : /* Safety checks to ensure we were handed a cache entry */
1259 1940857 : Assert(ct->ct_magic == CT_MAGIC);
1260 1940857 : Assert(ct->refcount > 0);
1261 :
1262 1940857 : ct->refcount--;
1263 1940857 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1264 :
1265 1940857 : if (
1266 : #ifndef CATCACHE_FORCE_RELEASE
1267 1940924 : ct->dead &&
1268 : #endif
1269 111 : ct->refcount == 0 &&
1270 44 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1271 44 : CatCacheRemoveCTup(ct->my_cache, ct);
1272 1940857 : }
1273 :
1274 :
1275 : /*
1276 : * GetCatCacheHashValue
1277 : *
1278 : * Compute the hash value for a given set of search keys.
1279 : *
1280 : * The reason for exposing this as part of the API is that the hash value is
1281 : * exposed in cache invalidation operations, so there are places outside the
1282 : * catcache code that need to be able to compute the hash values.
1283 : */
1284 : uint32
1285 5943 : GetCatCacheHashValue(CatCache *cache,
1286 : Datum v1,
1287 : Datum v2,
1288 : Datum v3,
1289 : Datum v4)
1290 : {
1291 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1292 :
1293 : /*
1294 : * one-time startup overhead for each cache
1295 : */
1296 5943 : if (cache->cc_tupdesc == NULL)
1297 0 : CatalogCacheInitializeCache(cache);
1298 :
1299 : /*
1300 : * initialize the search key information
1301 : */
1302 5943 : memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1303 5943 : cur_skey[0].sk_argument = v1;
1304 5943 : cur_skey[1].sk_argument = v2;
1305 5943 : cur_skey[2].sk_argument = v3;
1306 5943 : cur_skey[3].sk_argument = v4;
1307 :
1308 : /*
1309 : * calculate the hash value
1310 : */
1311 5943 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1312 : }
1313 :
1314 :
1315 : /*
1316 : * SearchCatCacheList
1317 : *
1318 : * Generate a list of all tuples matching a partial key (that is,
1319 : * a key specifying just the first K of the cache's N key columns).
1320 : *
1321 : * The caller must not modify the list object or the pointed-to tuples,
1322 : * and must call ReleaseCatCacheList() when done with the list.
1323 : */
1324 : CatCList *
1325 101636 : SearchCatCacheList(CatCache *cache,
1326 : int nkeys,
1327 : Datum v1,
1328 : Datum v2,
1329 : Datum v3,
1330 : Datum v4)
1331 : {
1332 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1333 : uint32 lHashValue;
1334 : dlist_iter iter;
1335 : CatCList *cl;
1336 : CatCTup *ct;
1337 : List *volatile ctlist;
1338 : ListCell *ctlist_item;
1339 : int nmembers;
1340 : bool ordered;
1341 : HeapTuple ntp;
1342 : MemoryContext oldcxt;
1343 : int i;
1344 :
1345 : /*
1346 : * one-time startup overhead for each cache
1347 : */
1348 101636 : if (cache->cc_tupdesc == NULL)
1349 579 : CatalogCacheInitializeCache(cache);
1350 :
1351 101636 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1352 :
1353 : #ifdef CATCACHE_STATS
1354 : cache->cc_lsearches++;
1355 : #endif
1356 :
1357 : /*
1358 : * initialize the search key information
1359 : */
1360 101636 : memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1361 101636 : cur_skey[0].sk_argument = v1;
1362 101636 : cur_skey[1].sk_argument = v2;
1363 101636 : cur_skey[2].sk_argument = v3;
1364 101636 : cur_skey[3].sk_argument = v4;
1365 :
1366 : /*
1367 : * compute a hash value of the given keys for faster search. We don't
1368 : * presently divide the CatCList items into buckets, but this still lets
1369 : * us skip non-matching items quickly most of the time.
1370 : */
1371 101636 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1372 :
1373 : /*
1374 : * scan the items until we find a match or exhaust our list
1375 : *
1376 : * Note: it's okay to use dlist_foreach here, even though we modify the
1377 : * dlist within the loop, because we don't continue the loop afterwards.
1378 : */
1379 338117 : dlist_foreach(iter, &cache->cc_lists)
1380 : {
1381 : bool res;
1382 :
1383 328112 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1384 :
1385 328112 : if (cl->dead)
1386 0 : continue; /* ignore dead entries */
1387 :
1388 328112 : if (cl->hash_value != lHashValue)
1389 236481 : continue; /* quickly skip entry if wrong hash val */
1390 :
1391 : /*
1392 : * see if the cached list matches our key.
1393 : */
1394 91631 : if (cl->nkeys != nkeys)
1395 0 : continue;
1396 91631 : HeapKeyTest(&cl->tuple,
1397 : cache->cc_tupdesc,
1398 : nkeys,
1399 : cur_skey,
1400 : res);
1401 91631 : if (!res)
1402 0 : continue;
1403 :
1404 : /*
1405 : * We found a matching list. Move the list to the front of the
1406 : * cache's list-of-lists, to speed subsequent searches. (We do not
1407 : * move the members to the fronts of their hashbucket lists, however,
1408 : * since there's no point in that unless they are searched for
1409 : * individually.)
1410 : */
1411 91631 : dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1412 :
1413 : /* Bump the list's refcount and return it */
1414 91631 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1415 91631 : cl->refcount++;
1416 91631 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1417 :
1418 : CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1419 : cache->cc_relname);
1420 :
1421 : #ifdef CATCACHE_STATS
1422 : cache->cc_lhits++;
1423 : #endif
1424 :
1425 91631 : return cl;
1426 : }
1427 :
1428 : /*
1429 : * List was not found in cache, so we have to build it by reading the
1430 : * relation. For each matching tuple found in the relation, use an
1431 : * existing cache entry if possible, else build a new one.
1432 : *
1433 : * We have to bump the member refcounts temporarily to ensure they won't
1434 : * get dropped from the cache while loading other members. We use a PG_TRY
1435 : * block to ensure we can undo those refcounts if we get an error before
1436 : * we finish constructing the CatCList.
1437 : */
1438 10005 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1439 :
1440 10005 : ctlist = NIL;
1441 :
1442 10005 : PG_TRY();
1443 : {
1444 : Relation relation;
1445 : SysScanDesc scandesc;
1446 :
1447 10005 : relation = heap_open(cache->cc_reloid, AccessShareLock);
1448 :
1449 10005 : scandesc = systable_beginscan(relation,
1450 : cache->cc_indexoid,
1451 10005 : IndexScanOK(cache, cur_skey),
1452 : NULL,
1453 : nkeys,
1454 : cur_skey);
1455 :
1456 : /* The list will be ordered iff we are doing an index scan */
1457 10005 : ordered = (scandesc->irel != NULL);
1458 :
1459 51714 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1460 : {
1461 : uint32 hashValue;
1462 : Index hashIndex;
1463 31704 : bool found = false;
1464 : dlist_head *bucket;
1465 :
1466 : /*
1467 : * See if there's an entry for this tuple already.
1468 : */
1469 31704 : ct = NULL;
1470 31704 : hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1471 31704 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1472 :
1473 31704 : bucket = &cache->cc_bucket[hashIndex];
1474 45019 : dlist_foreach(iter, bucket)
1475 : {
1476 19555 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1477 :
1478 19555 : if (ct->dead || ct->negative)
1479 66 : continue; /* ignore dead and negative entries */
1480 :
1481 19489 : if (ct->hash_value != hashValue)
1482 12604 : continue; /* quickly skip entry if wrong hash val */
1483 :
1484 6885 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1485 0 : continue; /* not same tuple */
1486 :
1487 : /*
1488 : * Found a match, but can't use it if it belongs to another
1489 : * list already
1490 : */
1491 6885 : if (ct->c_list)
1492 645 : continue;
1493 :
1494 6240 : found = true;
1495 6240 : break; /* A-OK */
1496 : }
1497 :
1498 31704 : if (!found)
1499 : {
1500 : /* We didn't find a usable entry, so make a new one */
1501 25464 : ct = CatalogCacheCreateEntry(cache, ntp,
1502 : hashValue, hashIndex,
1503 : false);
1504 : }
1505 :
1506 : /* Careful here: add entry to ctlist, then bump its refcount */
1507 : /* This way leaves state correct if lappend runs out of memory */
1508 31704 : ctlist = lappend(ctlist, ct);
1509 31704 : ct->refcount++;
1510 : }
1511 :
1512 10005 : systable_endscan(scandesc);
1513 :
1514 10005 : heap_close(relation, AccessShareLock);
1515 :
1516 : /*
1517 : * Now we can build the CatCList entry. First we need a dummy tuple
1518 : * containing the key values...
1519 : */
1520 10005 : ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1521 10005 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1522 10005 : nmembers = list_length(ctlist);
1523 10005 : cl = (CatCList *)
1524 10005 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1525 10005 : heap_copytuple_with_tuple(ntp, &cl->tuple);
1526 10005 : MemoryContextSwitchTo(oldcxt);
1527 10005 : heap_freetuple(ntp);
1528 :
1529 : /*
1530 : * We are now past the last thing that could trigger an elog before we
1531 : * have finished building the CatCList and remembering it in the
1532 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1533 : * we'd better do so before we start marking the members as belonging
1534 : * to the list.
1535 : */
1536 :
1537 : }
1538 0 : PG_CATCH();
1539 : {
1540 0 : foreach(ctlist_item, ctlist)
1541 : {
1542 0 : ct = (CatCTup *) lfirst(ctlist_item);
1543 0 : Assert(ct->c_list == NULL);
1544 0 : Assert(ct->refcount > 0);
1545 0 : ct->refcount--;
1546 0 : if (
1547 : #ifndef CATCACHE_FORCE_RELEASE
1548 0 : ct->dead &&
1549 : #endif
1550 0 : ct->refcount == 0 &&
1551 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1552 0 : CatCacheRemoveCTup(cache, ct);
1553 : }
1554 :
1555 0 : PG_RE_THROW();
1556 : }
1557 10005 : PG_END_TRY();
1558 :
1559 10005 : cl->cl_magic = CL_MAGIC;
1560 10005 : cl->my_cache = cache;
1561 10005 : cl->refcount = 0; /* for the moment */
1562 10005 : cl->dead = false;
1563 10005 : cl->ordered = ordered;
1564 10005 : cl->nkeys = nkeys;
1565 10005 : cl->hash_value = lHashValue;
1566 10005 : cl->n_members = nmembers;
1567 :
1568 10005 : i = 0;
1569 41709 : foreach(ctlist_item, ctlist)
1570 : {
1571 31704 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1572 31704 : Assert(ct->c_list == NULL);
1573 31704 : ct->c_list = cl;
1574 : /* release the temporary refcount on the member */
1575 31704 : Assert(ct->refcount > 0);
1576 31704 : ct->refcount--;
1577 : /* mark list dead if any members already dead */
1578 31704 : if (ct->dead)
1579 0 : cl->dead = true;
1580 : }
1581 10005 : Assert(i == nmembers);
1582 :
1583 10005 : dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1584 :
1585 : /* Finally, bump the list's refcount and return it */
1586 10005 : cl->refcount++;
1587 10005 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1588 :
1589 : CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1590 : cache->cc_relname, nmembers);
1591 :
1592 10005 : return cl;
1593 : }
1594 :
1595 : /*
1596 : * ReleaseCatCacheList
1597 : *
1598 : * Decrement the reference count of a catcache list.
1599 : */
1600 : void
1601 101636 : ReleaseCatCacheList(CatCList *list)
1602 : {
1603 : /* Safety checks to ensure we were handed a cache entry */
1604 101636 : Assert(list->cl_magic == CL_MAGIC);
1605 101636 : Assert(list->refcount > 0);
1606 101636 : list->refcount--;
1607 101636 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1608 :
1609 101636 : if (
1610 : #ifndef CATCACHE_FORCE_RELEASE
1611 101637 : list->dead &&
1612 : #endif
1613 1 : list->refcount == 0)
1614 1 : CatCacheRemoveCList(list->my_cache, list);
1615 101636 : }
1616 :
1617 :
1618 : /*
1619 : * CatalogCacheCreateEntry
1620 : * Create a new CatCTup entry, copying the given HeapTuple and other
1621 : * supplied data into it. The new entry initially has refcount 0.
1622 : */
1623 : static CatCTup *
1624 145475 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1625 : uint32 hashValue, Index hashIndex, bool negative)
1626 : {
1627 : CatCTup *ct;
1628 : HeapTuple dtp;
1629 : MemoryContext oldcxt;
1630 :
1631 : /*
1632 : * If there are any out-of-line toasted fields in the tuple, expand them
1633 : * in-line. This saves cycles during later use of the catcache entry, and
1634 : * also protects us against the possibility of the toast tuples being
1635 : * freed before we attempt to fetch them, in case of something using a
1636 : * slightly stale catcache entry.
1637 : */
1638 145475 : if (HeapTupleHasExternal(ntp))
1639 92 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1640 : else
1641 145383 : dtp = ntp;
1642 :
1643 : /*
1644 : * Allocate CatCTup header in cache memory, and copy the tuple there too.
1645 : */
1646 145475 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1647 145475 : ct = (CatCTup *) palloc(sizeof(CatCTup));
1648 145475 : heap_copytuple_with_tuple(dtp, &ct->tuple);
1649 145475 : MemoryContextSwitchTo(oldcxt);
1650 :
1651 145475 : if (dtp != ntp)
1652 92 : heap_freetuple(dtp);
1653 :
1654 : /*
1655 : * Finish initializing the CatCTup header, and add it to the cache's
1656 : * linked list and counts.
1657 : */
1658 145475 : ct->ct_magic = CT_MAGIC;
1659 145475 : ct->my_cache = cache;
1660 145475 : ct->c_list = NULL;
1661 145475 : ct->refcount = 0; /* for the moment */
1662 145475 : ct->dead = false;
1663 145475 : ct->negative = negative;
1664 145475 : ct->hash_value = hashValue;
1665 :
1666 145475 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1667 :
1668 145475 : cache->cc_ntup++;
1669 145475 : CacheHdr->ch_ntup++;
1670 :
1671 : /*
1672 : * If the hash table has become too full, enlarge the buckets array. Quite
1673 : * arbitrarily, we enlarge when fill factor > 2.
1674 : */
1675 145475 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
1676 111 : RehashCatCache(cache);
1677 :
1678 145475 : return ct;
1679 : }
1680 :
1681 : /*
1682 : * build_dummy_tuple
1683 : * Generate a palloc'd HeapTuple that contains the specified key
1684 : * columns, and NULLs for other columns.
1685 : *
1686 : * This is used to store the keys for negative cache entries and CatCList
1687 : * entries, which don't have real tuples associated with them.
1688 : */
1689 : static HeapTuple
1690 51877 : build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1691 : {
1692 : HeapTuple ntp;
1693 51877 : TupleDesc tupDesc = cache->cc_tupdesc;
1694 : Datum *values;
1695 : bool *nulls;
1696 51877 : Oid tupOid = InvalidOid;
1697 : NameData tempNames[4];
1698 : int i;
1699 :
1700 51877 : values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1701 51877 : nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
1702 :
1703 51877 : memset(values, 0, tupDesc->natts * sizeof(Datum));
1704 51877 : memset(nulls, true, tupDesc->natts * sizeof(bool));
1705 :
1706 157303 : for (i = 0; i < nkeys; i++)
1707 : {
1708 105426 : int attindex = cache->cc_key[i];
1709 105426 : Datum keyval = skeys[i].sk_argument;
1710 :
1711 105426 : if (attindex > 0)
1712 : {
1713 : /*
1714 : * Here we must be careful in case the caller passed a C string
1715 : * where a NAME is wanted: convert the given argument to a
1716 : * correctly padded NAME. Otherwise the memcpy() done in
1717 : * heap_form_tuple could fall off the end of memory.
1718 : */
1719 105397 : if (cache->cc_isname[i])
1720 : {
1721 25795 : Name newval = &tempNames[i];
1722 :
1723 25795 : namestrcpy(newval, DatumGetCString(keyval));
1724 25795 : keyval = NameGetDatum(newval);
1725 : }
1726 105397 : values[attindex - 1] = keyval;
1727 105397 : nulls[attindex - 1] = false;
1728 : }
1729 : else
1730 : {
1731 29 : Assert(attindex == ObjectIdAttributeNumber);
1732 29 : tupOid = DatumGetObjectId(keyval);
1733 : }
1734 : }
1735 :
1736 51877 : ntp = heap_form_tuple(tupDesc, values, nulls);
1737 51877 : if (tupOid != InvalidOid)
1738 8 : HeapTupleSetOid(ntp, tupOid);
1739 :
1740 51877 : pfree(values);
1741 51877 : pfree(nulls);
1742 :
1743 51877 : return ntp;
1744 : }
1745 :
1746 :
1747 : /*
1748 : * PrepareToInvalidateCacheTuple()
1749 : *
1750 : * This is part of a rather subtle chain of events, so pay attention:
1751 : *
1752 : * When a tuple is inserted or deleted, it cannot be flushed from the
1753 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
1754 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
1755 : * invalidations that will be done at the end of the command or transaction.
1756 : *
1757 : * The lists of tuples that need to be flushed are kept by inval.c. This
1758 : * routine is a helper routine for inval.c. Given a tuple belonging to
1759 : * the specified relation, find all catcaches it could be in, compute the
1760 : * correct hash value for each such catcache, and call the specified
1761 : * function to record the cache id and hash value in inval.c's lists.
1762 : * SysCacheInvalidate will be called later, if appropriate,
1763 : * using the recorded information.
1764 : *
1765 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1766 : * For an update, we are called just once, with tuple being the old tuple
1767 : * version and newtuple the new version. We should make two list entries
1768 : * if the tuple's hash value changed, but only one if it didn't.
1769 : *
1770 : * Note that it is irrelevant whether the given tuple is actually loaded
1771 : * into the catcache at the moment. Even if it's not there now, it might
1772 : * be by the end of the command, or there might be a matching negative entry
1773 : * to flush --- or other backends' caches might have such entries --- so
1774 : * we have to make list entries to flush it later.
1775 : *
1776 : * Also note that it's not an error if there are no catcaches for the
1777 : * specified relation. inval.c doesn't know exactly which rels have
1778 : * catcaches --- it will call this routine for any tuple that's in a
1779 : * system relation.
1780 : */
1781 : void
1782 81982 : PrepareToInvalidateCacheTuple(Relation relation,
1783 : HeapTuple tuple,
1784 : HeapTuple newtuple,
1785 : void (*function) (int, uint32, Oid))
1786 : {
1787 : slist_iter iter;
1788 : Oid reloid;
1789 :
1790 : CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1791 :
1792 : /*
1793 : * sanity checks
1794 : */
1795 81982 : Assert(RelationIsValid(relation));
1796 81982 : Assert(HeapTupleIsValid(tuple));
1797 81982 : Assert(PointerIsValid(function));
1798 81982 : Assert(CacheHdr != NULL);
1799 :
1800 81982 : reloid = RelationGetRelid(relation);
1801 :
1802 : /* ----------------
1803 : * for each cache
1804 : * if the cache contains tuples from the specified relation
1805 : * compute the tuple's hash value(s) in this cache,
1806 : * and call the passed function to register the information.
1807 : * ----------------
1808 : */
1809 :
1810 6394596 : slist_foreach(iter, &CacheHdr->ch_caches)
1811 : {
1812 6312614 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
1813 : uint32 hashvalue;
1814 : Oid dbid;
1815 :
1816 6312614 : if (ccp->cc_reloid != reloid)
1817 6165267 : continue;
1818 :
1819 : /* Just in case cache hasn't finished initialization yet... */
1820 147347 : if (ccp->cc_tupdesc == NULL)
1821 637 : CatalogCacheInitializeCache(ccp);
1822 :
1823 147347 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
1824 147347 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
1825 :
1826 147347 : (*function) (ccp->id, hashvalue, dbid);
1827 :
1828 147347 : if (newtuple)
1829 : {
1830 : uint32 newhashvalue;
1831 :
1832 11111 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
1833 :
1834 11111 : if (newhashvalue != hashvalue)
1835 456 : (*function) (ccp->id, newhashvalue, dbid);
1836 : }
1837 : }
1838 81982 : }
1839 :
1840 :
1841 : /*
1842 : * Subroutines for warning about reference leaks. These are exported so
1843 : * that resowner.c can call them.
1844 : */
1845 : void
1846 0 : PrintCatCacheLeakWarning(HeapTuple tuple)
1847 : {
1848 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1849 : offsetof(CatCTup, tuple));
1850 :
1851 : /* Safety check to ensure we were handed a cache entry */
1852 0 : Assert(ct->ct_magic == CT_MAGIC);
1853 :
1854 0 : elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1855 : ct->my_cache->cc_relname, ct->my_cache->id,
1856 : ItemPointerGetBlockNumber(&(tuple->t_self)),
1857 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
1858 : ct->refcount);
1859 0 : }
1860 :
1861 : void
1862 0 : PrintCatCacheListLeakWarning(CatCList *list)
1863 : {
1864 0 : elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1865 : list->my_cache->cc_relname, list->my_cache->id,
1866 : list, list->refcount);
1867 0 : }
|