Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * inval.c
4 : * POSTGRES cache invalidation dispatcher code.
5 : *
6 : * This is subtle stuff, so pay attention:
7 : *
8 : * When a tuple is updated or deleted, our standard time qualification rules
9 : * consider that it is *still valid* so long as we are in the same command,
10 : * ie, until the next CommandCounterIncrement() or transaction commit.
11 : * (See utils/time/tqual.c, and note that system catalogs are generally
12 : * scanned under the most current snapshot available, rather than the
13 : * transaction snapshot.) At the command boundary, the old tuple stops
14 : * being valid and the new version, if any, becomes valid. Therefore,
15 : * we cannot simply flush a tuple from the system caches during heap_update()
16 : * or heap_delete(). The tuple is still good at that point; what's more,
17 : * even if we did flush it, it might be reloaded into the caches by a later
18 : * request in the same command. So the correct behavior is to keep a list
19 : * of outdated (updated/deleted) tuples and then do the required cache
20 : * flushes at the next command boundary. We must also keep track of
21 : * inserted tuples so that we can flush "negative" cache entries that match
22 : * the new tuples; again, that mustn't happen until end of command.
23 : *
24 : * Once we have finished the command, we still need to remember inserted
25 : * tuples (including new versions of updated tuples), so that we can flush
26 : * them from the caches if we abort the transaction. Similarly, we'd better
27 : * be able to flush "negative" cache entries that may have been loaded in
28 : * place of deleted tuples, so we still need the deleted ones too.
29 : *
30 : * If we successfully complete the transaction, we have to broadcast all
31 : * these invalidation events to other backends (via the SI message queue)
32 : * so that they can flush obsolete entries from their caches. Note we have
33 : * to record the transaction commit before sending SI messages, otherwise
34 : * the other backends won't see our updated tuples as good.
35 : *
36 : * When a subtransaction aborts, we can process and discard any events
37 : * it has queued. When a subtransaction commits, we just add its events
38 : * to the pending lists of the parent transaction.
39 : *
40 : * In short, we need to remember until xact end every insert or delete
41 : * of a tuple that might be in the system caches. Updates are treated as
42 : * two events, delete + insert, for simplicity. (If the update doesn't
43 : * change the tuple hash value, catcache.c optimizes this into one event.)
44 : *
45 : * We do not need to register EVERY tuple operation in this way, just those
46 : * on tuples in relations that have associated catcaches. We do, however,
47 : * have to register every operation on every tuple that *could* be in a
48 : * catcache, whether or not it currently is in our cache. Also, if the
49 : * tuple is in a relation that has multiple catcaches, we need to register
50 : * an invalidation message for each such catcache. catcache.c's
51 : * PrepareToInvalidateCacheTuple() routine provides the knowledge of which
52 : * catcaches may need invalidation for a given tuple.
53 : *
54 : * Also, whenever we see an operation on a pg_class, pg_attribute, or
55 : * pg_index tuple, we register a relcache flush operation for the relation
56 : * described by that tuple (as specified in CacheInvalidateHeapTuple()).
57 : *
58 : * We keep the relcache flush requests in lists separate from the catcache
59 : * tuple flush requests. This allows us to issue all the pending catcache
60 : * flushes before we issue relcache flushes, which saves us from loading
61 : * a catcache tuple during relcache load only to flush it again right away.
62 : * Also, we avoid queuing multiple relcache flush requests for the same
63 : * relation, since a relcache flush is relatively expensive to do.
64 : * (XXX is it worth testing likewise for duplicate catcache flush entries?
65 : * Probably not.)
66 : *
67 : * If a relcache flush is issued for a system relation that we preload
68 : * from the relcache init file, we must also delete the init file so that
69 : * it will be rebuilt during the next backend restart. The actual work of
70 : * manipulating the init file is in relcache.c, but we keep track of the
71 : * need for it here.
72 : *
73 : * The request lists proper are kept in CurTransactionContext of their
74 : * creating (sub)transaction, since they can be forgotten on abort of that
75 : * transaction but must be kept till top-level commit otherwise. For
76 : * simplicity we keep the controlling list-of-lists in TopTransactionContext.
77 : *
78 : * Currently, inval messages are sent without regard for the possibility
79 : * that the object described by the catalog tuple might be a session-local
80 : * object such as a temporary table. This is because (1) this code has
81 : * no practical way to tell the difference, and (2) it is not certain that
82 : * other backends don't have catalog cache or even relcache entries for
83 : * such tables, anyway; there is nothing that prevents that. It might be
84 : * worth trying to avoid sending such inval traffic in the future, if those
85 : * problems can be overcome cheaply.
86 : *
87 : *
88 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
89 : * Portions Copyright (c) 1994, Regents of the University of California
90 : *
91 : * IDENTIFICATION
92 : * src/backend/utils/cache/inval.c
93 : *
94 : *-------------------------------------------------------------------------
95 : */
96 : #include "postgres.h"
97 :
98 : #include <limits.h>
99 :
100 : #include "access/htup_details.h"
101 : #include "access/xact.h"
102 : #include "catalog/catalog.h"
103 : #include "miscadmin.h"
104 : #include "storage/sinval.h"
105 : #include "storage/smgr.h"
106 : #include "utils/catcache.h"
107 : #include "utils/inval.h"
108 : #include "utils/memdebug.h"
109 : #include "utils/memutils.h"
110 : #include "utils/rel.h"
111 : #include "utils/relmapper.h"
112 : #include "utils/snapmgr.h"
113 : #include "utils/syscache.h"
114 :
115 :
116 : /*
117 : * To minimize palloc traffic, we keep pending requests in successively-
118 : * larger chunks (a slightly more sophisticated version of an expansible
119 : * array). All request types can be stored as SharedInvalidationMessage
120 : * records. The ordering of requests within a list is never significant.
121 : */
122 : typedef struct InvalidationChunk
123 : {
124 : struct InvalidationChunk *next; /* list link */
125 : int nitems; /* # items currently stored in chunk */
126 : int maxitems; /* size of allocated array in this chunk */
127 : SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER];
128 : } InvalidationChunk;
129 :
130 : typedef struct InvalidationListHeader
131 : {
132 : InvalidationChunk *cclist; /* list of chunks holding catcache msgs */
133 : InvalidationChunk *rclist; /* list of chunks holding relcache msgs */
134 : } InvalidationListHeader;
135 :
136 : /*----------------
137 : * Invalidation info is divided into two lists:
138 : * 1) events so far in current command, not yet reflected to caches.
139 : * 2) events in previous commands of current transaction; these have
140 : * been reflected to local caches, and must be either broadcast to
141 : * other backends or rolled back from local cache when we commit
142 : * or abort the transaction.
143 : * Actually, we need two such lists for each level of nested transaction,
144 : * so that we can discard events from an aborted subtransaction. When
145 : * a subtransaction commits, we append its lists to the parent's lists.
146 : *
147 : * The relcache-file-invalidated flag can just be a simple boolean,
148 : * since we only act on it at transaction commit; we don't care which
149 : * command of the transaction set it.
150 : *----------------
151 : */
152 :
153 : typedef struct TransInvalidationInfo
154 : {
155 : /* Back link to parent transaction's info */
156 : struct TransInvalidationInfo *parent;
157 :
158 : /* Subtransaction nesting depth */
159 : int my_level;
160 :
161 : /* head of current-command event list */
162 : InvalidationListHeader CurrentCmdInvalidMsgs;
163 :
164 : /* head of previous-commands event list */
165 : InvalidationListHeader PriorCmdInvalidMsgs;
166 :
167 : /* init file must be invalidated? */
168 : bool RelcacheInitFileInval;
169 : } TransInvalidationInfo;
170 :
171 : static TransInvalidationInfo *transInvalInfo = NULL;
172 :
173 : static SharedInvalidationMessage *SharedInvalidMessagesArray;
174 : static int numSharedInvalidMessagesArray;
175 : static int maxSharedInvalidMessagesArray;
176 :
177 :
178 : /*
179 : * Dynamically-registered callback functions. Current implementation
180 : * assumes there won't be enough of these to justify a dynamically resizable
181 : * array; it'd be easy to improve that if needed.
182 : *
183 : * To avoid searching in CallSyscacheCallbacks, all callbacks for a given
184 : * syscache are linked into a list pointed to by syscache_callback_links[id].
185 : * The link values are syscache_callback_list[] index plus 1, or 0 for none.
186 : */
187 :
188 : #define MAX_SYSCACHE_CALLBACKS 64
189 : #define MAX_RELCACHE_CALLBACKS 10
190 :
191 : static struct SYSCACHECALLBACK
192 : {
193 : int16 id; /* cache number */
194 : int16 link; /* next callback index+1 for same cache */
195 : SyscacheCallbackFunction function;
196 : Datum arg;
197 : } syscache_callback_list[MAX_SYSCACHE_CALLBACKS];
198 :
199 : static int16 syscache_callback_links[SysCacheSize];
200 :
201 : static int syscache_callback_count = 0;
202 :
203 : static struct RELCACHECALLBACK
204 : {
205 : RelcacheCallbackFunction function;
206 : Datum arg;
207 : } relcache_callback_list[MAX_RELCACHE_CALLBACKS];
208 :
209 : static int relcache_callback_count = 0;
210 :
211 : /* ----------------------------------------------------------------
212 : * Invalidation list support functions
213 : *
214 : * These three routines encapsulate processing of the "chunked"
215 : * representation of what is logically just a list of messages.
216 : * ----------------------------------------------------------------
217 : */
218 :
219 : /*
220 : * AddInvalidationMessage
221 : * Add an invalidation message to a list (of chunks).
222 : *
223 : * Note that we do not pay any great attention to maintaining the original
224 : * ordering of the messages.
225 : */
226 : static void
227 183660 : AddInvalidationMessage(InvalidationChunk **listHdr,
228 : SharedInvalidationMessage *msg)
229 : {
230 183660 : InvalidationChunk *chunk = *listHdr;
231 :
232 183660 : if (chunk == NULL)
233 : {
234 : /* First time through; create initial chunk */
235 : #define FIRSTCHUNKSIZE 32
236 41521 : chunk = (InvalidationChunk *)
237 41521 : MemoryContextAlloc(CurTransactionContext,
238 : offsetof(InvalidationChunk, msgs) +
239 : FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage));
240 41521 : chunk->nitems = 0;
241 41521 : chunk->maxitems = FIRSTCHUNKSIZE;
242 41521 : chunk->next = *listHdr;
243 41521 : *listHdr = chunk;
244 : }
245 142139 : else if (chunk->nitems >= chunk->maxitems)
246 : {
247 : /* Need another chunk; double size of last chunk */
248 82 : int chunksize = 2 * chunk->maxitems;
249 :
250 82 : chunk = (InvalidationChunk *)
251 82 : MemoryContextAlloc(CurTransactionContext,
252 : offsetof(InvalidationChunk, msgs) +
253 82 : chunksize * sizeof(SharedInvalidationMessage));
254 82 : chunk->nitems = 0;
255 82 : chunk->maxitems = chunksize;
256 82 : chunk->next = *listHdr;
257 82 : *listHdr = chunk;
258 : }
259 : /* Okay, add message to current chunk */
260 183660 : chunk->msgs[chunk->nitems] = *msg;
261 183660 : chunk->nitems++;
262 183660 : }
263 :
264 : /*
265 : * Append one list of invalidation message chunks to another, resetting
266 : * the source chunk-list pointer to NULL.
267 : */
268 : static void
269 54266 : AppendInvalidationMessageList(InvalidationChunk **destHdr,
270 : InvalidationChunk **srcHdr)
271 : {
272 54266 : InvalidationChunk *chunk = *srcHdr;
273 :
274 54266 : if (chunk == NULL)
275 67051 : return; /* nothing to do */
276 :
277 83056 : while (chunk->next != NULL)
278 94 : chunk = chunk->next;
279 :
280 41481 : chunk->next = *destHdr;
281 :
282 41481 : *destHdr = *srcHdr;
283 :
284 41481 : *srcHdr = NULL;
285 : }
286 :
287 : /*
288 : * Process a list of invalidation messages.
289 : *
290 : * This is a macro that executes the given code fragment for each message in
291 : * a message chunk list. The fragment should refer to the message as *msg.
292 : */
293 : #define ProcessMessageList(listHdr, codeFragment) \
294 : do { \
295 : InvalidationChunk *_chunk; \
296 : for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
297 : { \
298 : int _cindex; \
299 : for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \
300 : { \
301 : SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \
302 : codeFragment; \
303 : } \
304 : } \
305 : } while (0)
306 :
307 : /*
308 : * Process a list of invalidation messages group-wise.
309 : *
310 : * As above, but the code fragment can handle an array of messages.
311 : * The fragment should refer to the messages as msgs[], with n entries.
312 : */
313 : #define ProcessMessageListMulti(listHdr, codeFragment) \
314 : do { \
315 : InvalidationChunk *_chunk; \
316 : for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \
317 : { \
318 : SharedInvalidationMessage *msgs = _chunk->msgs; \
319 : int n = _chunk->nitems; \
320 : codeFragment; \
321 : } \
322 : } while (0)
323 :
324 :
325 : /* ----------------------------------------------------------------
326 : * Invalidation set support functions
327 : *
328 : * These routines understand about the division of a logical invalidation
329 : * list into separate physical lists for catcache and relcache entries.
330 : * ----------------------------------------------------------------
331 : */
332 :
333 : /*
334 : * Add a catcache inval entry
335 : */
336 : static void
337 147803 : AddCatcacheInvalidationMessage(InvalidationListHeader *hdr,
338 : int id, uint32 hashValue, Oid dbId)
339 : {
340 : SharedInvalidationMessage msg;
341 :
342 147803 : Assert(id < CHAR_MAX);
343 147803 : msg.cc.id = (int8) id;
344 147803 : msg.cc.dbId = dbId;
345 147803 : msg.cc.hashValue = hashValue;
346 :
347 : /*
348 : * Define padding bytes in SharedInvalidationMessage structs to be
349 : * defined. Otherwise the sinvaladt.c ringbuffer, which is accessed by
350 : * multiple processes, will cause spurious valgrind warnings about
351 : * undefined memory being used. That's because valgrind remembers the
352 : * undefined bytes from the last local process's store, not realizing that
353 : * another process has written since, filling the previously uninitialized
354 : * bytes
355 : */
356 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
357 :
358 147803 : AddInvalidationMessage(&hdr->cclist, &msg);
359 147803 : }
360 :
361 : /*
362 : * Add a whole-catalog inval entry
363 : */
364 : static void
365 3 : AddCatalogInvalidationMessage(InvalidationListHeader *hdr,
366 : Oid dbId, Oid catId)
367 : {
368 : SharedInvalidationMessage msg;
369 :
370 3 : msg.cat.id = SHAREDINVALCATALOG_ID;
371 3 : msg.cat.dbId = dbId;
372 3 : msg.cat.catId = catId;
373 : /* check AddCatcacheInvalidationMessage() for an explanation */
374 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
375 :
376 3 : AddInvalidationMessage(&hdr->cclist, &msg);
377 3 : }
378 :
379 : /*
380 : * Add a relcache inval entry
381 : */
382 : static void
383 63797 : AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
384 : Oid dbId, Oid relId)
385 : {
386 : SharedInvalidationMessage msg;
387 :
388 : /*
389 : * Don't add a duplicate item. We assume dbId need not be checked because
390 : * it will never change. InvalidOid for relId means all relations so we
391 : * don't need to add individual ones when it is present.
392 : */
393 109990 : ProcessMessageList(hdr->rclist,
394 : if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
395 : (msg->rc.relId == relId ||
396 : msg->rc.relId == InvalidOid))
397 : return);
398 :
399 : /* OK, add the item */
400 17604 : msg.rc.id = SHAREDINVALRELCACHE_ID;
401 17604 : msg.rc.dbId = dbId;
402 17604 : msg.rc.relId = relId;
403 : /* check AddCatcacheInvalidationMessage() for an explanation */
404 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
405 :
406 17604 : AddInvalidationMessage(&hdr->rclist, &msg);
407 : }
408 :
409 : /*
410 : * Add a snapshot inval entry
411 : */
412 : static void
413 41602 : AddSnapshotInvalidationMessage(InvalidationListHeader *hdr,
414 : Oid dbId, Oid relId)
415 : {
416 : SharedInvalidationMessage msg;
417 :
418 : /* Don't add a duplicate item */
419 : /* We assume dbId need not be checked because it will never change */
420 64954 : ProcessMessageList(hdr->rclist,
421 : if (msg->sn.id == SHAREDINVALSNAPSHOT_ID &&
422 : msg->sn.relId == relId)
423 : return);
424 :
425 : /* OK, add the item */
426 18250 : msg.sn.id = SHAREDINVALSNAPSHOT_ID;
427 18250 : msg.sn.dbId = dbId;
428 18250 : msg.sn.relId = relId;
429 : /* check AddCatcacheInvalidationMessage() for an explanation */
430 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
431 :
432 18250 : AddInvalidationMessage(&hdr->rclist, &msg);
433 : }
434 :
435 : /*
436 : * Append one list of invalidation messages to another, resetting
437 : * the source list to empty.
438 : */
439 : static void
440 27133 : AppendInvalidationMessages(InvalidationListHeader *dest,
441 : InvalidationListHeader *src)
442 : {
443 27133 : AppendInvalidationMessageList(&dest->cclist, &src->cclist);
444 27133 : AppendInvalidationMessageList(&dest->rclist, &src->rclist);
445 27133 : }
446 :
447 : /*
448 : * Execute the given function for all the messages in an invalidation list.
449 : * The list is not altered.
450 : *
451 : * catcache entries are processed first, for reasons mentioned above.
452 : */
453 : static void
454 20193 : ProcessInvalidationMessages(InvalidationListHeader *hdr,
455 : void (*func) (SharedInvalidationMessage *msg))
456 : {
457 20193 : ProcessMessageList(hdr->cclist, func(msg));
458 20193 : ProcessMessageList(hdr->rclist, func(msg));
459 20193 : }
460 :
461 : /*
462 : * As above, but the function is able to process an array of messages
463 : * rather than just one at a time.
464 : */
465 : static void
466 21655 : ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr,
467 : void (*func) (const SharedInvalidationMessage *msgs, int n))
468 : {
469 21655 : ProcessMessageListMulti(hdr->cclist, func(msgs, n));
470 21655 : ProcessMessageListMulti(hdr->rclist, func(msgs, n));
471 21655 : }
472 :
473 : /* ----------------------------------------------------------------
474 : * private support functions
475 : * ----------------------------------------------------------------
476 : */
477 :
478 : /*
479 : * RegisterCatcacheInvalidation
480 : *
481 : * Register an invalidation event for a catcache tuple entry.
482 : */
483 : static void
484 147803 : RegisterCatcacheInvalidation(int cacheId,
485 : uint32 hashValue,
486 : Oid dbId)
487 : {
488 147803 : AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
489 : cacheId, hashValue, dbId);
490 147803 : }
491 :
492 : /*
493 : * RegisterCatalogInvalidation
494 : *
495 : * Register an invalidation event for all catcache entries from a catalog.
496 : */
497 : static void
498 3 : RegisterCatalogInvalidation(Oid dbId, Oid catId)
499 : {
500 3 : AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
501 : dbId, catId);
502 3 : }
503 :
504 : /*
505 : * RegisterRelcacheInvalidation
506 : *
507 : * As above, but register a relcache invalidation event.
508 : */
509 : static void
510 63797 : RegisterRelcacheInvalidation(Oid dbId, Oid relId)
511 : {
512 63797 : AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
513 : dbId, relId);
514 :
515 : /*
516 : * Most of the time, relcache invalidation is associated with system
517 : * catalog updates, but there are a few cases where it isn't. Quick hack
518 : * to ensure that the next CommandCounterIncrement() will think that we
519 : * need to do CommandEndInvalidationMessages().
520 : */
521 63797 : (void) GetCurrentCommandId(true);
522 :
523 : /*
524 : * If the relation being invalidated is one of those cached in the local
525 : * relcache init file, mark that we need to zap that file at commit. Same
526 : * is true when we are invalidating whole relcache.
527 : */
528 127528 : if (OidIsValid(dbId) &&
529 127198 : (RelationIdIsInInitFile(relId) || relId == InvalidOid))
530 264 : transInvalInfo->RelcacheInitFileInval = true;
531 63797 : }
532 :
533 : /*
534 : * RegisterSnapshotInvalidation
535 : *
536 : * Register an invalidation event for MVCC scans against a given catalog.
537 : * Only needed for catalogs that don't have catcaches.
538 : */
539 : static void
540 41602 : RegisterSnapshotInvalidation(Oid dbId, Oid relId)
541 : {
542 41602 : AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
543 : dbId, relId);
544 41602 : }
545 :
546 : /*
547 : * LocalExecuteInvalidationMessage
548 : *
549 : * Process a single invalidation message (which could be of any type).
550 : * Only the local caches are flushed; this does not transmit the message
551 : * to other backends.
552 : */
553 : void
554 1196690 : LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
555 : {
556 1196690 : if (msg->id >= 0)
557 : {
558 946841 : if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid)
559 : {
560 679694 : InvalidateCatalogSnapshot();
561 :
562 679694 : SysCacheInvalidate(msg->cc.id, msg->cc.hashValue);
563 :
564 679694 : CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue);
565 : }
566 : }
567 249849 : else if (msg->id == SHAREDINVALCATALOG_ID)
568 : {
569 24 : if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid)
570 : {
571 20 : InvalidateCatalogSnapshot();
572 :
573 20 : CatalogCacheFlushCatalog(msg->cat.catId);
574 :
575 : /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */
576 : }
577 : }
578 249825 : else if (msg->id == SHAREDINVALRELCACHE_ID)
579 : {
580 110893 : if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
581 : {
582 : int i;
583 :
584 78907 : if (msg->rc.relId == InvalidOid)
585 3 : RelationCacheInvalidate();
586 : else
587 78904 : RelationCacheInvalidateEntry(msg->rc.relId);
588 :
589 216516 : for (i = 0; i < relcache_callback_count; i++)
590 : {
591 137609 : struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
592 :
593 137609 : (*ccitem->function) (ccitem->arg, msg->rc.relId);
594 : }
595 : }
596 : }
597 138932 : else if (msg->id == SHAREDINVALSMGR_ID)
598 : {
599 : /*
600 : * We could have smgr entries for relations of other databases, so no
601 : * short-circuit test is possible here.
602 : */
603 : RelFileNodeBackend rnode;
604 :
605 20007 : rnode.node = msg->sm.rnode;
606 20007 : rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
607 20007 : smgrclosenode(rnode);
608 : }
609 118925 : else if (msg->id == SHAREDINVALRELMAP_ID)
610 : {
611 : /* We only care about our own database and shared catalogs */
612 14 : if (msg->rm.dbId == InvalidOid)
613 7 : RelationMapInvalidate(true);
614 7 : else if (msg->rm.dbId == MyDatabaseId)
615 5 : RelationMapInvalidate(false);
616 : }
617 118911 : else if (msg->id == SHAREDINVALSNAPSHOT_ID)
618 : {
619 : /* We only care about our own database and shared catalogs */
620 118911 : if (msg->rm.dbId == InvalidOid)
621 6939 : InvalidateCatalogSnapshot();
622 111972 : else if (msg->rm.dbId == MyDatabaseId)
623 80561 : InvalidateCatalogSnapshot();
624 : }
625 : else
626 0 : elog(FATAL, "unrecognized SI message ID: %d", msg->id);
627 1196690 : }
628 :
629 : /*
630 : * InvalidateSystemCaches
631 : *
632 : * This blows away all tuples in the system catalog caches and
633 : * all the cached relation descriptors and smgr cache entries.
634 : * Relation descriptors that have positive refcounts are then rebuilt.
635 : *
636 : * We call this when we see a shared-inval-queue overflow signal,
637 : * since that tells us we've lost some shared-inval messages and hence
638 : * don't know what needs to be invalidated.
639 : */
640 : void
641 125 : InvalidateSystemCaches(void)
642 : {
643 : int i;
644 :
645 125 : InvalidateCatalogSnapshot();
646 125 : ResetCatalogCaches();
647 125 : RelationCacheInvalidate(); /* gets smgr and relmap too */
648 :
649 1305 : for (i = 0; i < syscache_callback_count; i++)
650 : {
651 1180 : struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
652 :
653 1180 : (*ccitem->function) (ccitem->arg, ccitem->id, 0);
654 : }
655 :
656 254 : for (i = 0; i < relcache_callback_count; i++)
657 : {
658 129 : struct RELCACHECALLBACK *ccitem = relcache_callback_list + i;
659 :
660 129 : (*ccitem->function) (ccitem->arg, InvalidOid);
661 : }
662 125 : }
663 :
664 :
665 : /* ----------------------------------------------------------------
666 : * public functions
667 : * ----------------------------------------------------------------
668 : */
669 :
670 : /*
671 : * AcceptInvalidationMessages
672 : * Read and process invalidation messages from the shared invalidation
673 : * message queue.
674 : *
675 : * Note:
676 : * This should be called as the first step in processing a transaction.
677 : */
678 : void
679 910295 : AcceptInvalidationMessages(void)
680 : {
681 910295 : ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage,
682 : InvalidateSystemCaches);
683 :
684 : /*
685 : * Test code to force cache flushes anytime a flush could happen.
686 : *
687 : * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a
688 : * fairly thorough test that the system contains no cache-flush hazards.
689 : * However, it also makes the system unbelievably slow --- the regression
690 : * tests take about 100 times longer than normal.
691 : *
692 : * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
693 : * slows things by at least a factor of 10000, so I wouldn't suggest
694 : * trying to run the entire regression tests that way. It's useful to try
695 : * a few simple tests, to make sure that cache reload isn't subject to
696 : * internal cache-flush hazards, but after you've done a few thousand
697 : * recursive reloads it's unlikely you'll learn more.
698 : */
699 : #if defined(CLOBBER_CACHE_ALWAYS)
700 : {
701 : static bool in_recursion = false;
702 :
703 : if (!in_recursion)
704 : {
705 : in_recursion = true;
706 : InvalidateSystemCaches();
707 : in_recursion = false;
708 : }
709 : }
710 : #elif defined(CLOBBER_CACHE_RECURSIVELY)
711 : InvalidateSystemCaches();
712 : #endif
713 910295 : }
714 :
715 : /*
716 : * PrepareInvalidationState
717 : * Initialize inval lists for the current (sub)transaction.
718 : */
719 : static void
720 126769 : PrepareInvalidationState(void)
721 : {
722 : TransInvalidationInfo *myInfo;
723 :
724 246052 : if (transInvalInfo != NULL &&
725 119283 : transInvalInfo->my_level == GetCurrentTransactionNestLevel())
726 246038 : return;
727 :
728 7500 : myInfo = (TransInvalidationInfo *)
729 7500 : MemoryContextAllocZero(TopTransactionContext,
730 : sizeof(TransInvalidationInfo));
731 7500 : myInfo->parent = transInvalInfo;
732 7500 : myInfo->my_level = GetCurrentTransactionNestLevel();
733 :
734 : /*
735 : * If there's any previous entry, this one should be for a deeper nesting
736 : * level.
737 : */
738 7500 : Assert(transInvalInfo == NULL ||
739 : myInfo->my_level > transInvalInfo->my_level);
740 :
741 7500 : transInvalInfo = myInfo;
742 : }
743 :
744 : /*
745 : * PostPrepare_Inval
746 : * Clean up after successful PREPARE.
747 : *
748 : * Here, we want to act as though the transaction aborted, so that we will
749 : * undo any syscache changes it made, thereby bringing us into sync with the
750 : * outside world, which doesn't believe the transaction committed yet.
751 : *
752 : * If the prepared transaction is later aborted, there is nothing more to
753 : * do; if it commits, we will receive the consequent inval messages just
754 : * like everyone else.
755 : */
756 : void
757 6 : PostPrepare_Inval(void)
758 : {
759 6 : AtEOXact_Inval(false);
760 6 : }
761 :
762 : /*
763 : * Collect invalidation messages into SharedInvalidMessagesArray array.
764 : */
765 : static void
766 39950 : MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n)
767 : {
768 : /*
769 : * Initialise array first time through in each commit
770 : */
771 39950 : if (SharedInvalidMessagesArray == NULL)
772 : {
773 7192 : maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE;
774 7192 : numSharedInvalidMessagesArray = 0;
775 :
776 : /*
777 : * Although this is being palloc'd we don't actually free it directly.
778 : * We're so close to EOXact that we now we're going to lose it anyhow.
779 : */
780 7192 : SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray
781 : * sizeof(SharedInvalidationMessage));
782 : }
783 :
784 39950 : if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
785 : {
786 7530 : while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray)
787 2512 : maxSharedInvalidMessagesArray *= 2;
788 :
789 2509 : SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray,
790 : maxSharedInvalidMessagesArray
791 : * sizeof(SharedInvalidationMessage));
792 : }
793 :
794 : /*
795 : * Append the next chunk onto the array
796 : */
797 39950 : memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray,
798 : msgs, n * sizeof(SharedInvalidationMessage));
799 39950 : numSharedInvalidMessagesArray += n;
800 39950 : }
801 :
802 : /*
803 : * xactGetCommittedInvalidationMessages() is executed by
804 : * RecordTransactionCommit() to add invalidation messages onto the
805 : * commit record. This applies only to commit message types, never to
806 : * abort records. Must always run before AtEOXact_Inval(), since that
807 : * removes the data we need to see.
808 : *
809 : * Remember that this runs before we have officially committed, so we
810 : * must not do anything here to change what might occur *if* we should
811 : * fail between here and the actual commit.
812 : *
813 : * see also xact_redo_commit() and xact_desc_commit()
814 : */
815 : int
816 22798 : xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
817 : bool *RelcacheInitFileInval)
818 : {
819 : MemoryContext oldcontext;
820 :
821 : /* Quick exit if we haven't done anything with invalidation messages. */
822 22798 : if (transInvalInfo == NULL)
823 : {
824 15579 : *RelcacheInitFileInval = false;
825 15579 : *msgs = NULL;
826 15579 : return 0;
827 : }
828 :
829 : /* Must be at top of stack */
830 7219 : Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
831 :
832 : /*
833 : * Relcache init file invalidation requires processing both before and
834 : * after we send the SI messages. However, we need not do anything unless
835 : * we committed.
836 : */
837 7219 : *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
838 :
839 : /*
840 : * Walk through TransInvalidationInfo to collect all the messages into a
841 : * single contiguous array of invalidation messages. It must be contiguous
842 : * so we can copy directly into WAL message. Maintain the order that they
843 : * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour
844 : * in redo is as similar as possible to original. We want the same bugs,
845 : * if any, not new ones.
846 : */
847 7219 : oldcontext = MemoryContextSwitchTo(CurTransactionContext);
848 :
849 7219 : ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
850 : MakeSharedInvalidMessagesArray);
851 7219 : ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
852 : MakeSharedInvalidMessagesArray);
853 7219 : MemoryContextSwitchTo(oldcontext);
854 :
855 7219 : Assert(!(numSharedInvalidMessagesArray > 0 &&
856 : SharedInvalidMessagesArray == NULL));
857 :
858 7219 : *msgs = SharedInvalidMessagesArray;
859 :
860 7219 : return numSharedInvalidMessagesArray;
861 : }
862 :
863 : /*
864 : * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() or
865 : * standby_redo() to process invalidation messages. Currently that happens
866 : * only at end-of-xact.
867 : *
868 : * Relcache init file invalidation requires processing both
869 : * before and after we send the SI messages. See AtEOXact_Inval()
870 : */
871 : void
872 0 : ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
873 : int nmsgs, bool RelcacheInitFileInval,
874 : Oid dbid, Oid tsid)
875 : {
876 0 : if (nmsgs <= 0)
877 0 : return;
878 :
879 0 : elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs,
880 : (RelcacheInitFileInval ? " and relcache file invalidation" : ""));
881 :
882 0 : if (RelcacheInitFileInval)
883 : {
884 : /*
885 : * RelationCacheInitFilePreInvalidate requires DatabasePath to be set,
886 : * but we should not use SetDatabasePath during recovery, since it is
887 : * intended to be used only once by normal backends. Hence, a quick
888 : * hack: set DatabasePath directly then unset after use.
889 : */
890 0 : DatabasePath = GetDatabasePath(dbid, tsid);
891 0 : elog(trace_recovery(DEBUG4), "removing relcache init file in \"%s\"",
892 : DatabasePath);
893 0 : RelationCacheInitFilePreInvalidate();
894 0 : pfree(DatabasePath);
895 0 : DatabasePath = NULL;
896 : }
897 :
898 0 : SendSharedInvalidMessages(msgs, nmsgs);
899 :
900 0 : if (RelcacheInitFileInval)
901 0 : RelationCacheInitFilePostInvalidate();
902 : }
903 :
904 : /*
905 : * AtEOXact_Inval
906 : * Process queued-up invalidation messages at end of main transaction.
907 : *
908 : * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
909 : * to the shared invalidation message queue. Note that these will be read
910 : * not only by other backends, but also by our own backend at the next
911 : * transaction start (via AcceptInvalidationMessages). This means that
912 : * we can skip immediate local processing of anything that's still in
913 : * CurrentCmdInvalidMsgs, and just send that list out too.
914 : *
915 : * If not isCommit, we are aborting, and must locally process the messages
916 : * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
917 : * since they'll not have seen our changed tuples anyway. We can forget
918 : * about CurrentCmdInvalidMsgs too, since those changes haven't touched
919 : * the caches yet.
920 : *
921 : * In any case, reset the various lists to empty. We need not physically
922 : * free memory here, since TopTransactionContext is about to be emptied
923 : * anyway.
924 : *
925 : * Note:
926 : * This should be called as the last step in processing a transaction.
927 : */
928 : void
929 26218 : AtEOXact_Inval(bool isCommit)
930 : {
931 : /* Quick exit if no messages */
932 26218 : if (transInvalInfo == NULL)
933 44957 : return;
934 :
935 : /* Must be at top of stack */
936 7479 : Assert(transInvalInfo->my_level == 1 && transInvalInfo->parent == NULL);
937 :
938 7479 : if (isCommit)
939 : {
940 : /*
941 : * Relcache init file invalidation requires processing both before and
942 : * after we send the SI messages. However, we need not do anything
943 : * unless we committed.
944 : */
945 7217 : if (transInvalInfo->RelcacheInitFileInval)
946 101 : RelationCacheInitFilePreInvalidate();
947 :
948 7217 : AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
949 7217 : &transInvalInfo->CurrentCmdInvalidMsgs);
950 :
951 7217 : ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
952 : SendSharedInvalidMessages);
953 :
954 7217 : if (transInvalInfo->RelcacheInitFileInval)
955 101 : RelationCacheInitFilePostInvalidate();
956 : }
957 : else
958 : {
959 262 : ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
960 : LocalExecuteInvalidationMessage);
961 : }
962 :
963 : /* Need not free anything explicitly */
964 7479 : transInvalInfo = NULL;
965 7479 : SharedInvalidMessagesArray = NULL;
966 7479 : numSharedInvalidMessagesArray = 0;
967 : }
968 :
969 : /*
970 : * AtEOSubXact_Inval
971 : * Process queued-up invalidation messages at end of subtransaction.
972 : *
973 : * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't),
974 : * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the
975 : * parent's PriorCmdInvalidMsgs list.
976 : *
977 : * If not isCommit, we are aborting, and must locally process the messages
978 : * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
979 : * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
980 : * touched the caches yet.
981 : *
982 : * In any case, pop the transaction stack. We need not physically free memory
983 : * here, since CurTransactionContext is about to be emptied anyway
984 : * (if aborting). Beware of the possibility of aborting the same nesting
985 : * level twice, though.
986 : */
987 : void
988 372 : AtEOSubXact_Inval(bool isCommit)
989 : {
990 : int my_level;
991 372 : TransInvalidationInfo *myInfo = transInvalInfo;
992 :
993 : /* Quick exit if no messages. */
994 372 : if (myInfo == NULL)
995 315 : return;
996 :
997 : /* Also bail out quickly if messages are not for this level. */
998 57 : my_level = GetCurrentTransactionNestLevel();
999 57 : if (myInfo->my_level != my_level)
1000 : {
1001 33 : Assert(myInfo->my_level < my_level);
1002 33 : return;
1003 : }
1004 :
1005 24 : if (isCommit)
1006 : {
1007 : /* If CurrentCmdInvalidMsgs still has anything, fix it */
1008 6 : CommandEndInvalidationMessages();
1009 :
1010 : /*
1011 : * We create invalidation stack entries lazily, so the parent might
1012 : * not have one. Instead of creating one, moving all the data over,
1013 : * and then freeing our own, we can just adjust the level of our own
1014 : * entry.
1015 : */
1016 6 : if (myInfo->parent == NULL || myInfo->parent->my_level < my_level - 1)
1017 : {
1018 3 : myInfo->my_level--;
1019 3 : return;
1020 : }
1021 :
1022 : /* Pass up my inval messages to parent */
1023 3 : AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs,
1024 : &myInfo->PriorCmdInvalidMsgs);
1025 :
1026 : /* Pending relcache inval becomes parent's problem too */
1027 3 : if (myInfo->RelcacheInitFileInval)
1028 0 : myInfo->parent->RelcacheInitFileInval = true;
1029 :
1030 : /* Pop the transaction state stack */
1031 3 : transInvalInfo = myInfo->parent;
1032 :
1033 : /* Need not free anything else explicitly */
1034 3 : pfree(myInfo);
1035 : }
1036 : else
1037 : {
1038 18 : ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs,
1039 : LocalExecuteInvalidationMessage);
1040 :
1041 : /* Pop the transaction state stack */
1042 18 : transInvalInfo = myInfo->parent;
1043 :
1044 : /* Need not free anything else explicitly */
1045 18 : pfree(myInfo);
1046 : }
1047 : }
1048 :
1049 : /*
1050 : * CommandEndInvalidationMessages
1051 : * Process queued-up invalidation messages at end of one command
1052 : * in a transaction.
1053 : *
1054 : * Here, we send no messages to the shared queue, since we don't know yet if
1055 : * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
1056 : * list, so as to flush our caches of any entries we have outdated in the
1057 : * current command. We then move the current-cmd list over to become part
1058 : * of the prior-cmds list.
1059 : *
1060 : * Note:
1061 : * This should be called during CommandCounterIncrement(),
1062 : * after we have advanced the command ID.
1063 : */
1064 : void
1065 22096 : CommandEndInvalidationMessages(void)
1066 : {
1067 : /*
1068 : * You might think this shouldn't be called outside any transaction, but
1069 : * bootstrap does it, and also ABORT issued when not in a transaction. So
1070 : * just quietly return if no state to work on.
1071 : */
1072 22096 : if (transInvalInfo == NULL)
1073 24279 : return;
1074 :
1075 19913 : ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
1076 : LocalExecuteInvalidationMessage);
1077 19913 : AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
1078 19913 : &transInvalInfo->CurrentCmdInvalidMsgs);
1079 : }
1080 :
1081 :
1082 : /*
1083 : * CacheInvalidateHeapTuple
1084 : * Register the given tuple for invalidation at end of command
1085 : * (ie, current command is creating or outdating this tuple).
1086 : * Also, detect whether a relcache invalidation is implied.
1087 : *
1088 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1089 : * For an update, we are called just once, with tuple being the old tuple
1090 : * version and newtuple the new version. This allows avoidance of duplicate
1091 : * effort during an update.
1092 : */
1093 : void
1094 741934 : CacheInvalidateHeapTuple(Relation relation,
1095 : HeapTuple tuple,
1096 : HeapTuple newtuple)
1097 : {
1098 : Oid tupleRelId;
1099 : Oid databaseId;
1100 : Oid relationId;
1101 :
1102 : /* Do nothing during bootstrap */
1103 741934 : if (IsBootstrapProcessingMode())
1104 7239 : return;
1105 :
1106 : /*
1107 : * We only need to worry about invalidation for tuples that are in system
1108 : * catalogs; user-relation tuples are never in catcaches and can't affect
1109 : * the relcache either.
1110 : */
1111 734695 : if (!IsCatalogRelation(relation))
1112 610820 : return;
1113 :
1114 : /*
1115 : * IsCatalogRelation() will return true for TOAST tables of system
1116 : * catalogs, but we don't care about those, either.
1117 : */
1118 123875 : if (IsToastRelation(relation))
1119 291 : return;
1120 :
1121 : /*
1122 : * If we're not prepared to queue invalidation messages for this
1123 : * subtransaction level, get ready now.
1124 : */
1125 123584 : PrepareInvalidationState();
1126 :
1127 : /*
1128 : * First let the catcache do its thing
1129 : */
1130 123584 : tupleRelId = RelationGetRelid(relation);
1131 123584 : if (RelationInvalidatesSnapshotsOnly(tupleRelId))
1132 : {
1133 41602 : databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
1134 41602 : RegisterSnapshotInvalidation(databaseId, tupleRelId);
1135 : }
1136 : else
1137 81982 : PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
1138 : RegisterCatcacheInvalidation);
1139 :
1140 : /*
1141 : * Now, is this tuple one of the primary definers of a relcache entry? See
1142 : * comments in file header for deeper explanation.
1143 : *
1144 : * Note we ignore newtuple here; we assume an update cannot move a tuple
1145 : * from being part of one relcache entry to being part of another.
1146 : */
1147 123584 : if (tupleRelId == RelationRelationId)
1148 : {
1149 14001 : Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple);
1150 :
1151 14001 : relationId = HeapTupleGetOid(tuple);
1152 14001 : if (classtup->relisshared)
1153 57 : databaseId = InvalidOid;
1154 : else
1155 13944 : databaseId = MyDatabaseId;
1156 : }
1157 109583 : else if (tupleRelId == AttributeRelationId)
1158 : {
1159 44554 : Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
1160 :
1161 44554 : relationId = atttup->attrelid;
1162 :
1163 : /*
1164 : * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
1165 : * even if the rel in question is shared (which we can't easily tell).
1166 : * This essentially means that only backends in this same database
1167 : * will react to the relcache flush request. This is in fact
1168 : * appropriate, since only those backends could see our pg_attribute
1169 : * change anyway. It looks a bit ugly though. (In practice, shared
1170 : * relations can't have schema changes after bootstrap, so we should
1171 : * never come here for a shared rel anyway.)
1172 : */
1173 44554 : databaseId = MyDatabaseId;
1174 : }
1175 65029 : else if (tupleRelId == IndexRelationId)
1176 : {
1177 2224 : Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple);
1178 :
1179 : /*
1180 : * When a pg_index row is updated, we should send out a relcache inval
1181 : * for the index relation. As above, we don't know the shared status
1182 : * of the index, but in practice it doesn't matter since indexes of
1183 : * shared catalogs can't have such updates.
1184 : */
1185 2224 : relationId = indextup->indexrelid;
1186 2224 : databaseId = MyDatabaseId;
1187 : }
1188 : else
1189 62805 : return;
1190 :
1191 : /*
1192 : * Yes. We need to register a relcache invalidation event.
1193 : */
1194 60779 : RegisterRelcacheInvalidation(databaseId, relationId);
1195 : }
1196 :
1197 : /*
1198 : * CacheInvalidateCatalog
1199 : * Register invalidation of the whole content of a system catalog.
1200 : *
1201 : * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much
1202 : * changed any tuples as moved them around. Some uses of catcache entries
1203 : * expect their TIDs to be correct, so we have to blow away the entries.
1204 : *
1205 : * Note: we expect caller to verify that the rel actually is a system
1206 : * catalog. If it isn't, no great harm is done, just a wasted sinval message.
1207 : */
1208 : void
1209 3 : CacheInvalidateCatalog(Oid catalogId)
1210 : {
1211 : Oid databaseId;
1212 :
1213 3 : PrepareInvalidationState();
1214 :
1215 3 : if (IsSharedRelation(catalogId))
1216 1 : databaseId = InvalidOid;
1217 : else
1218 2 : databaseId = MyDatabaseId;
1219 :
1220 3 : RegisterCatalogInvalidation(databaseId, catalogId);
1221 3 : }
1222 :
1223 : /*
1224 : * CacheInvalidateRelcache
1225 : * Register invalidation of the specified relation's relcache entry
1226 : * at end of command.
1227 : *
1228 : * This is used in places that need to force relcache rebuild but aren't
1229 : * changing any of the tuples recognized as contributors to the relcache
1230 : * entry by CacheInvalidateHeapTuple. (An example is dropping an index.)
1231 : */
1232 : void
1233 2310 : CacheInvalidateRelcache(Relation relation)
1234 : {
1235 : Oid databaseId;
1236 : Oid relationId;
1237 :
1238 2310 : PrepareInvalidationState();
1239 :
1240 2310 : relationId = RelationGetRelid(relation);
1241 2310 : if (relation->rd_rel->relisshared)
1242 0 : databaseId = InvalidOid;
1243 : else
1244 2310 : databaseId = MyDatabaseId;
1245 :
1246 2310 : RegisterRelcacheInvalidation(databaseId, relationId);
1247 2310 : }
1248 :
1249 : /*
1250 : * CacheInvalidateRelcacheAll
1251 : * Register invalidation of the whole relcache at the end of command.
1252 : *
1253 : * This is used by alter publication as changes in publications may affect
1254 : * large number of tables.
1255 : */
1256 : void
1257 1 : CacheInvalidateRelcacheAll(void)
1258 : {
1259 1 : PrepareInvalidationState();
1260 :
1261 1 : RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
1262 1 : }
1263 :
1264 : /*
1265 : * CacheInvalidateRelcacheByTuple
1266 : * As above, but relation is identified by passing its pg_class tuple.
1267 : */
1268 : void
1269 707 : CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
1270 : {
1271 707 : Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
1272 : Oid databaseId;
1273 : Oid relationId;
1274 :
1275 707 : PrepareInvalidationState();
1276 :
1277 707 : relationId = HeapTupleGetOid(classTuple);
1278 707 : if (classtup->relisshared)
1279 8 : databaseId = InvalidOid;
1280 : else
1281 699 : databaseId = MyDatabaseId;
1282 707 : RegisterRelcacheInvalidation(databaseId, relationId);
1283 707 : }
1284 :
1285 : /*
1286 : * CacheInvalidateRelcacheByRelid
1287 : * As above, but relation is identified by passing its OID.
1288 : * This is the least efficient of the three options; use one of
1289 : * the above routines if you have a Relation or pg_class tuple.
1290 : */
1291 : void
1292 164 : CacheInvalidateRelcacheByRelid(Oid relid)
1293 : {
1294 : HeapTuple tup;
1295 :
1296 164 : PrepareInvalidationState();
1297 :
1298 164 : tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1299 164 : if (!HeapTupleIsValid(tup))
1300 0 : elog(ERROR, "cache lookup failed for relation %u", relid);
1301 164 : CacheInvalidateRelcacheByTuple(tup);
1302 164 : ReleaseSysCache(tup);
1303 164 : }
1304 :
1305 :
1306 : /*
1307 : * CacheInvalidateSmgr
1308 : * Register invalidation of smgr references to a physical relation.
1309 : *
1310 : * Sending this type of invalidation msg forces other backends to close open
1311 : * smgr entries for the rel. This should be done to flush dangling open-file
1312 : * references when the physical rel is being dropped or truncated. Because
1313 : * these are nontransactional (i.e., not-rollback-able) operations, we just
1314 : * send the inval message immediately without any queuing.
1315 : *
1316 : * Note: in most cases there will have been a relcache flush issued against
1317 : * the rel at the logical level. We need a separate smgr-level flush because
1318 : * it is possible for backends to have open smgr entries for rels they don't
1319 : * have a relcache entry for, e.g. because the only thing they ever did with
1320 : * the rel is write out dirty shared buffers.
1321 : *
1322 : * Note: because these messages are nontransactional, they won't be captured
1323 : * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr()
1324 : * should happen in low-level smgr.c routines, which are executed while
1325 : * replaying WAL as well as when creating it.
1326 : *
1327 : * Note: In order to avoid bloating SharedInvalidationMessage, we store only
1328 : * three bytes of the backend ID using what would otherwise be padding space.
1329 : * Thus, the maximum possible backend ID is 2^23-1.
1330 : */
1331 : void
1332 3404 : CacheInvalidateSmgr(RelFileNodeBackend rnode)
1333 : {
1334 : SharedInvalidationMessage msg;
1335 :
1336 3404 : msg.sm.id = SHAREDINVALSMGR_ID;
1337 3404 : msg.sm.backend_hi = rnode.backend >> 16;
1338 3404 : msg.sm.backend_lo = rnode.backend & 0xffff;
1339 3404 : msg.sm.rnode = rnode.node;
1340 : /* check AddCatcacheInvalidationMessage() for an explanation */
1341 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1342 :
1343 3404 : SendSharedInvalidMessages(&msg, 1);
1344 3404 : }
1345 :
1346 : /*
1347 : * CacheInvalidateRelmap
1348 : * Register invalidation of the relation mapping for a database,
1349 : * or for the shared catalogs if databaseId is zero.
1350 : *
1351 : * Sending this type of invalidation msg forces other backends to re-read
1352 : * the indicated relation mapping file. It is also necessary to send a
1353 : * relcache inval for the specific relations whose mapping has been altered,
1354 : * else the relcache won't get updated with the new filenode data.
1355 : *
1356 : * Note: because these messages are nontransactional, they won't be captured
1357 : * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap()
1358 : * should happen in low-level relmapper.c routines, which are executed while
1359 : * replaying WAL as well as when creating it.
1360 : */
1361 : void
1362 2 : CacheInvalidateRelmap(Oid databaseId)
1363 : {
1364 : SharedInvalidationMessage msg;
1365 :
1366 2 : msg.rm.id = SHAREDINVALRELMAP_ID;
1367 2 : msg.rm.dbId = databaseId;
1368 : /* check AddCatcacheInvalidationMessage() for an explanation */
1369 : VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
1370 :
1371 2 : SendSharedInvalidMessages(&msg, 1);
1372 2 : }
1373 :
1374 :
1375 : /*
1376 : * CacheRegisterSyscacheCallback
1377 : * Register the specified function to be called for all future
1378 : * invalidation events in the specified cache. The cache ID and the
1379 : * hash value of the tuple being invalidated will be passed to the
1380 : * function.
1381 : *
1382 : * NOTE: Hash value zero will be passed if a cache reset request is received.
1383 : * In this case the called routines should flush all cached state.
1384 : * Yes, there's a possibility of a false match to zero, but it doesn't seem
1385 : * worth troubling over, especially since most of the current callees just
1386 : * flush all cached state anyway.
1387 : */
1388 : void
1389 4288 : CacheRegisterSyscacheCallback(int cacheid,
1390 : SyscacheCallbackFunction func,
1391 : Datum arg)
1392 : {
1393 4288 : if (cacheid < 0 || cacheid >= SysCacheSize)
1394 0 : elog(FATAL, "invalid cache ID: %d", cacheid);
1395 4288 : if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS)
1396 0 : elog(FATAL, "out of syscache_callback_list slots");
1397 :
1398 4288 : if (syscache_callback_links[cacheid] == 0)
1399 : {
1400 : /* first callback for this cache */
1401 3923 : syscache_callback_links[cacheid] = syscache_callback_count + 1;
1402 : }
1403 : else
1404 : {
1405 : /* add to end of chain, so that older callbacks are called first */
1406 365 : int i = syscache_callback_links[cacheid] - 1;
1407 :
1408 730 : while (syscache_callback_list[i].link > 0)
1409 0 : i = syscache_callback_list[i].link - 1;
1410 365 : syscache_callback_list[i].link = syscache_callback_count + 1;
1411 : }
1412 :
1413 4288 : syscache_callback_list[syscache_callback_count].id = cacheid;
1414 4288 : syscache_callback_list[syscache_callback_count].link = 0;
1415 4288 : syscache_callback_list[syscache_callback_count].function = func;
1416 4288 : syscache_callback_list[syscache_callback_count].arg = arg;
1417 :
1418 4288 : ++syscache_callback_count;
1419 4288 : }
1420 :
1421 : /*
1422 : * CacheRegisterRelcacheCallback
1423 : * Register the specified function to be called for all future
1424 : * relcache invalidation events. The OID of the relation being
1425 : * invalidated will be passed to the function.
1426 : *
1427 : * NOTE: InvalidOid will be passed if a cache reset request is received.
1428 : * In this case the called routines should flush all cached state.
1429 : */
1430 : void
1431 472 : CacheRegisterRelcacheCallback(RelcacheCallbackFunction func,
1432 : Datum arg)
1433 : {
1434 472 : if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS)
1435 0 : elog(FATAL, "out of relcache_callback_list slots");
1436 :
1437 472 : relcache_callback_list[relcache_callback_count].function = func;
1438 472 : relcache_callback_list[relcache_callback_count].arg = arg;
1439 :
1440 472 : ++relcache_callback_count;
1441 472 : }
1442 :
1443 : /*
1444 : * CallSyscacheCallbacks
1445 : *
1446 : * This is exported so that CatalogCacheFlushCatalog can call it, saving
1447 : * this module from knowing which catcache IDs correspond to which catalogs.
1448 : */
1449 : void
1450 679726 : CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
1451 : {
1452 : int i;
1453 :
1454 679726 : if (cacheid < 0 || cacheid >= SysCacheSize)
1455 0 : elog(ERROR, "invalid cache ID: %d", cacheid);
1456 :
1457 679726 : i = syscache_callback_links[cacheid] - 1;
1458 1421804 : while (i >= 0)
1459 : {
1460 62352 : struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i;
1461 :
1462 62352 : Assert(ccitem->id == cacheid);
1463 62352 : (*ccitem->function) (ccitem->arg, cacheid, hashvalue);
1464 62352 : i = ccitem->link - 1;
1465 : }
1466 679726 : }
|