Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * lock.c
4 : * POSTGRES primary lock mechanism
5 : *
6 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/lock.c
12 : *
13 : * NOTES
14 : * A lock table is a shared memory hash table. When
15 : * a process tries to acquire a lock of a type that conflicts
16 : * with existing locks, it is put to sleep using the routines
17 : * in storage/lmgr/proc.c.
18 : *
19 : * For the most part, this code should be invoked via lmgr.c
20 : * or another lock-management module, not directly.
21 : *
22 : * Interface:
23 : *
24 : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : * LockCheckConflicts(), GrantLock()
27 : *
28 : *-------------------------------------------------------------------------
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 :
35 : #include "access/transam.h"
36 : #include "access/twophase.h"
37 : #include "access/twophase_rmgr.h"
38 : #include "access/xact.h"
39 : #include "access/xlog.h"
40 : #include "miscadmin.h"
41 : #include "pg_trace.h"
42 : #include "pgstat.h"
43 : #include "storage/proc.h"
44 : #include "storage/procarray.h"
45 : #include "storage/sinvaladt.h"
46 : #include "storage/spin.h"
47 : #include "storage/standby.h"
48 : #include "utils/memutils.h"
49 : #include "utils/ps_status.h"
50 : #include "utils/resowner_private.h"
51 :
52 :
53 : /* This configuration variable is used to set the lock table size */
54 : int max_locks_per_xact; /* set by guc.c */
55 :
56 : #define NLOCKENTS() \
57 : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58 :
59 :
60 : /*
61 : * Data structures defining the semantics of the standard lock methods.
62 : *
63 : * The conflict table defines the semantics of the various lock modes.
64 : */
65 : static const LOCKMASK LockConflicts[] = {
66 : 0,
67 :
68 : /* AccessShareLock */
69 : LOCKBIT_ON(AccessExclusiveLock),
70 :
71 : /* RowShareLock */
72 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
73 :
74 : /* RowExclusiveLock */
75 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
76 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
77 :
78 : /* ShareUpdateExclusiveLock */
79 : LOCKBIT_ON(ShareUpdateExclusiveLock) |
80 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
81 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
82 :
83 : /* ShareLock */
84 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
85 : LOCKBIT_ON(ShareRowExclusiveLock) |
86 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
87 :
88 : /* ShareRowExclusiveLock */
89 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
90 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
91 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
92 :
93 : /* ExclusiveLock */
94 : LOCKBIT_ON(RowShareLock) |
95 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
96 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
97 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
98 :
99 : /* AccessExclusiveLock */
100 : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
101 : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
102 : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
103 : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
104 :
105 : };
106 :
107 : /* Names of lock modes, for debug printouts */
108 : static const char *const lock_mode_names[] =
109 : {
110 : "INVALID",
111 : "AccessShareLock",
112 : "RowShareLock",
113 : "RowExclusiveLock",
114 : "ShareUpdateExclusiveLock",
115 : "ShareLock",
116 : "ShareRowExclusiveLock",
117 : "ExclusiveLock",
118 : "AccessExclusiveLock"
119 : };
120 :
121 : #ifndef LOCK_DEBUG
122 : static bool Dummy_trace = false;
123 : #endif
124 :
125 : static const LockMethodData default_lockmethod = {
126 : AccessExclusiveLock, /* highest valid lock mode number */
127 : LockConflicts,
128 : lock_mode_names,
129 : #ifdef LOCK_DEBUG
130 : &Trace_locks
131 : #else
132 : &Dummy_trace
133 : #endif
134 : };
135 :
136 : static const LockMethodData user_lockmethod = {
137 : AccessExclusiveLock, /* highest valid lock mode number */
138 : LockConflicts,
139 : lock_mode_names,
140 : #ifdef LOCK_DEBUG
141 : &Trace_userlocks
142 : #else
143 : &Dummy_trace
144 : #endif
145 : };
146 :
147 : /*
148 : * map from lock method id to the lock table data structures
149 : */
150 : static const LockMethod LockMethods[] = {
151 : NULL,
152 : &default_lockmethod,
153 : &user_lockmethod
154 : };
155 :
156 :
157 : /* Record that's written to 2PC state file when a lock is persisted */
158 : typedef struct TwoPhaseLockRecord
159 : {
160 : LOCKTAG locktag;
161 : LOCKMODE lockmode;
162 : } TwoPhaseLockRecord;
163 :
164 :
165 : /*
166 : * Count of the number of fast path lock slots we believe to be used. This
167 : * might be higher than the real number if another backend has transferred
168 : * our locks to the primary lock table, but it can never be lower than the
169 : * real value, since only we can acquire locks on our own behalf.
170 : */
171 : static int FastPathLocalUseCount = 0;
172 :
173 : /* Macros for manipulating proc->fpLockBits */
174 : #define FAST_PATH_BITS_PER_SLOT 3
175 : #define FAST_PATH_LOCKNUMBER_OFFSET 1
176 : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
177 : #define FAST_PATH_GET_BITS(proc, n) \
178 : (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
179 : #define FAST_PATH_BIT_POSITION(n, l) \
180 : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
181 : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
182 : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
183 : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
184 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
185 : (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
186 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
187 : (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
188 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
189 : ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
190 :
191 : /*
192 : * The fast-path lock mechanism is concerned only with relation locks on
193 : * unshared relations by backends bound to a database. The fast-path
194 : * mechanism exists mostly to accelerate acquisition and release of locks
195 : * that rarely conflict. Because ShareUpdateExclusiveLock is
196 : * self-conflicting, it can't use the fast-path mechanism; but it also does
197 : * not conflict with any of the locks that do, so we can ignore it completely.
198 : */
199 : #define EligibleForRelationFastPath(locktag, mode) \
200 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
201 : (locktag)->locktag_type == LOCKTAG_RELATION && \
202 : (locktag)->locktag_field1 == MyDatabaseId && \
203 : MyDatabaseId != InvalidOid && \
204 : (mode) < ShareUpdateExclusiveLock)
205 : #define ConflictsWithRelationFastPath(locktag, mode) \
206 : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
207 : (locktag)->locktag_type == LOCKTAG_RELATION && \
208 : (locktag)->locktag_field1 != InvalidOid && \
209 : (mode) > ShareUpdateExclusiveLock)
210 :
211 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
212 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
213 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
214 : const LOCKTAG *locktag, uint32 hashcode);
215 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
216 :
217 : /*
218 : * To make the fast-path lock mechanism work, we must have some way of
219 : * preventing the use of the fast-path when a conflicting lock might be
220 : * present. We partition* the locktag space into FAST_PATH_HASH_BUCKETS
221 : * partitions, and maintain an integer count of the number of "strong" lockers
222 : * in each partition. When any "strong" lockers are present (which is
223 : * hopefully not very often), the fast-path mechanism can't be used, and we
224 : * must fall back to the slower method of pushing matching locks directly
225 : * into the main lock tables.
226 : *
227 : * The deadlock detector does not know anything about the fast path mechanism,
228 : * so any locks that might be involved in a deadlock must be transferred from
229 : * the fast-path queues to the main lock table.
230 : */
231 :
232 : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
233 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
234 : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
235 : #define FastPathStrongLockHashPartition(hashcode) \
236 : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
237 :
238 : typedef struct
239 : {
240 : slock_t mutex;
241 : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
242 : } FastPathStrongRelationLockData;
243 :
244 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
245 :
246 :
247 : /*
248 : * Pointers to hash tables containing lock state
249 : *
250 : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
251 : * shared memory; LockMethodLocalHash is local to each backend.
252 : */
253 : static HTAB *LockMethodLockHash;
254 : static HTAB *LockMethodProcLockHash;
255 : static HTAB *LockMethodLocalHash;
256 :
257 :
258 : /* private state for error cleanup */
259 : static LOCALLOCK *StrongLockInProgress;
260 : static LOCALLOCK *awaitedLock;
261 : static ResourceOwner awaitedOwner;
262 :
263 :
264 : #ifdef LOCK_DEBUG
265 :
266 : /*------
267 : * The following configuration options are available for lock debugging:
268 : *
269 : * TRACE_LOCKS -- give a bunch of output what's going on in this file
270 : * TRACE_USERLOCKS -- same but for user locks
271 : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
272 : * (use to avoid output on system tables)
273 : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
274 : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
275 : *
276 : * Furthermore, but in storage/lmgr/lwlock.c:
277 : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
278 : *
279 : * Define LOCK_DEBUG at compile time to get all these enabled.
280 : * --------
281 : */
282 :
283 : int Trace_lock_oidmin = FirstNormalObjectId;
284 : bool Trace_locks = false;
285 : bool Trace_userlocks = false;
286 : int Trace_lock_table = 0;
287 : bool Debug_deadlocks = false;
288 :
289 :
290 : inline static bool
291 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
292 : {
293 : return
294 : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
295 : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
296 : || (Trace_lock_table &&
297 : (tag->locktag_field2 == Trace_lock_table));
298 : }
299 :
300 :
301 : inline static void
302 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
303 : {
304 : if (LOCK_DEBUG_ENABLED(&lock->tag))
305 : elog(LOG,
306 : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
307 : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
308 : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
309 : where, lock,
310 : lock->tag.locktag_field1, lock->tag.locktag_field2,
311 : lock->tag.locktag_field3, lock->tag.locktag_field4,
312 : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
313 : lock->grantMask,
314 : lock->requested[1], lock->requested[2], lock->requested[3],
315 : lock->requested[4], lock->requested[5], lock->requested[6],
316 : lock->requested[7], lock->nRequested,
317 : lock->granted[1], lock->granted[2], lock->granted[3],
318 : lock->granted[4], lock->granted[5], lock->granted[6],
319 : lock->granted[7], lock->nGranted,
320 : lock->waitProcs.size,
321 : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
322 : }
323 :
324 :
325 : inline static void
326 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
327 : {
328 : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
329 : elog(LOG,
330 : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
331 : where, proclockP, proclockP->tag.myLock,
332 : PROCLOCK_LOCKMETHOD(*(proclockP)),
333 : proclockP->tag.myProc, (int) proclockP->holdMask);
334 : }
335 : #else /* not LOCK_DEBUG */
336 :
337 : #define LOCK_PRINT(where, lock, type) ((void) 0)
338 : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
339 : #endif /* not LOCK_DEBUG */
340 :
341 :
342 : static uint32 proclock_hash(const void *key, Size keysize);
343 : static void RemoveLocalLock(LOCALLOCK *locallock);
344 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
345 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
346 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
347 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
348 : static void FinishStrongLockAcquire(void);
349 : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
350 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
351 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
352 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
353 : PROCLOCK *proclock, LockMethod lockMethodTable);
354 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
355 : LockMethod lockMethodTable, uint32 hashcode,
356 : bool wakeupNeeded);
357 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
358 : LOCKTAG *locktag, LOCKMODE lockmode,
359 : bool decrement_strong_lock_count);
360 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
361 : BlockedProcsData *data);
362 :
363 :
364 : /*
365 : * InitLocks -- Initialize the lock manager's data structures.
366 : *
367 : * This is called from CreateSharedMemoryAndSemaphores(), which see for
368 : * more comments. In the normal postmaster case, the shared hash tables
369 : * are created here, as well as a locallock hash table that will remain
370 : * unused and empty in the postmaster itself. Backends inherit the pointers
371 : * to the shared tables via fork(), and also inherit an image of the locallock
372 : * hash table, which they proceed to use. In the EXEC_BACKEND case, each
373 : * backend re-executes this code to obtain pointers to the already existing
374 : * shared hash tables and to create its locallock hash table.
375 : */
376 : void
377 5 : InitLocks(void)
378 : {
379 : HASHCTL info;
380 : long init_table_size,
381 : max_table_size;
382 : bool found;
383 :
384 : /*
385 : * Compute init/max size to request for lock hashtables. Note these
386 : * calculations must agree with LockShmemSize!
387 : */
388 5 : max_table_size = NLOCKENTS();
389 5 : init_table_size = max_table_size / 2;
390 :
391 : /*
392 : * Allocate hash table for LOCK structs. This stores per-locked-object
393 : * information.
394 : */
395 5 : MemSet(&info, 0, sizeof(info));
396 5 : info.keysize = sizeof(LOCKTAG);
397 5 : info.entrysize = sizeof(LOCK);
398 5 : info.num_partitions = NUM_LOCK_PARTITIONS;
399 :
400 5 : LockMethodLockHash = ShmemInitHash("LOCK hash",
401 : init_table_size,
402 : max_table_size,
403 : &info,
404 : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
405 :
406 : /* Assume an average of 2 holders per lock */
407 5 : max_table_size *= 2;
408 5 : init_table_size *= 2;
409 :
410 : /*
411 : * Allocate hash table for PROCLOCK structs. This stores
412 : * per-lock-per-holder information.
413 : */
414 5 : info.keysize = sizeof(PROCLOCKTAG);
415 5 : info.entrysize = sizeof(PROCLOCK);
416 5 : info.hash = proclock_hash;
417 5 : info.num_partitions = NUM_LOCK_PARTITIONS;
418 :
419 5 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
420 : init_table_size,
421 : max_table_size,
422 : &info,
423 : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
424 :
425 : /*
426 : * Allocate fast-path structures.
427 : */
428 5 : FastPathStrongRelationLocks =
429 5 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
430 : sizeof(FastPathStrongRelationLockData), &found);
431 5 : if (!found)
432 5 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
433 :
434 : /*
435 : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
436 : * counts and resource owner information.
437 : *
438 : * The non-shared table could already exist in this process (this occurs
439 : * when the postmaster is recreating shared memory after a backend crash).
440 : * If so, delete and recreate it. (We could simply leave it, since it
441 : * ought to be empty in the postmaster, but for safety let's zap it.)
442 : */
443 5 : if (LockMethodLocalHash)
444 0 : hash_destroy(LockMethodLocalHash);
445 :
446 5 : info.keysize = sizeof(LOCALLOCKTAG);
447 5 : info.entrysize = sizeof(LOCALLOCK);
448 :
449 5 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
450 : 16,
451 : &info,
452 : HASH_ELEM | HASH_BLOBS);
453 5 : }
454 :
455 :
456 : /*
457 : * Fetch the lock method table associated with a given lock
458 : */
459 : LockMethod
460 0 : GetLocksMethodTable(const LOCK *lock)
461 : {
462 0 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
463 :
464 0 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
465 0 : return LockMethods[lockmethodid];
466 : }
467 :
468 : /*
469 : * Fetch the lock method table associated with a given locktag
470 : */
471 : LockMethod
472 0 : GetLockTagsMethodTable(const LOCKTAG *locktag)
473 : {
474 0 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
475 :
476 0 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
477 0 : return LockMethods[lockmethodid];
478 : }
479 :
480 :
481 : /*
482 : * Compute the hash code associated with a LOCKTAG.
483 : *
484 : * To avoid unnecessary recomputations of the hash code, we try to do this
485 : * just once per function, and then pass it around as needed. Aside from
486 : * passing the hashcode to hash_search_with_hash_value(), we can extract
487 : * the lock partition number from the hashcode.
488 : */
489 : uint32
490 913280 : LockTagHashCode(const LOCKTAG *locktag)
491 : {
492 913280 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
493 : }
494 :
495 : /*
496 : * Compute the hash code associated with a PROCLOCKTAG.
497 : *
498 : * Because we want to use just one set of partition locks for both the
499 : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
500 : * fall into the same partition number as their associated LOCKs.
501 : * dynahash.c expects the partition number to be the low-order bits of
502 : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
503 : * same low-order bits as the associated LOCKTAG's hash code. We achieve
504 : * this with this specialized hash function.
505 : */
506 : static uint32
507 17 : proclock_hash(const void *key, Size keysize)
508 : {
509 17 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
510 : uint32 lockhash;
511 : Datum procptr;
512 :
513 17 : Assert(keysize == sizeof(PROCLOCKTAG));
514 :
515 : /* Look into the associated LOCK object, and compute its hash code */
516 17 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
517 :
518 : /*
519 : * To make the hash code also depend on the PGPROC, we xor the proc
520 : * struct's address into the hash code, left-shifted so that the
521 : * partition-number bits don't change. Since this is only a hash, we
522 : * don't care if we lose high-order bits of the address; use an
523 : * intermediate variable to suppress cast-pointer-to-int warnings.
524 : */
525 17 : procptr = PointerGetDatum(proclocktag->myProc);
526 17 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
527 :
528 17 : return lockhash;
529 : }
530 :
531 : /*
532 : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
533 : * for its underlying LOCK.
534 : *
535 : * We use this just to avoid redundant calls of LockTagHashCode().
536 : */
537 : static inline uint32
538 180132 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
539 : {
540 180132 : uint32 lockhash = hashcode;
541 : Datum procptr;
542 :
543 : /*
544 : * This must match proclock_hash()!
545 : */
546 180132 : procptr = PointerGetDatum(proclocktag->myProc);
547 180132 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
548 :
549 180132 : return lockhash;
550 : }
551 :
552 : /*
553 : * Given two lock modes, return whether they would conflict.
554 : */
555 : bool
556 8614 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
557 : {
558 8614 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
559 :
560 8614 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
561 8614 : return true;
562 :
563 0 : return false;
564 : }
565 :
566 : /*
567 : * LockHasWaiters -- look up 'locktag' and check if releasing this
568 : * lock would wake up other processes waiting for it.
569 : */
570 : bool
571 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
572 : {
573 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
574 : LockMethod lockMethodTable;
575 : LOCALLOCKTAG localtag;
576 : LOCALLOCK *locallock;
577 : LOCK *lock;
578 : PROCLOCK *proclock;
579 : LWLock *partitionLock;
580 0 : bool hasWaiters = false;
581 :
582 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
583 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
584 0 : lockMethodTable = LockMethods[lockmethodid];
585 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
586 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
587 :
588 : #ifdef LOCK_DEBUG
589 : if (LOCK_DEBUG_ENABLED(locktag))
590 : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
591 : locktag->locktag_field1, locktag->locktag_field2,
592 : lockMethodTable->lockModeNames[lockmode]);
593 : #endif
594 :
595 : /*
596 : * Find the LOCALLOCK entry for this lock and lockmode
597 : */
598 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
599 0 : localtag.lock = *locktag;
600 0 : localtag.mode = lockmode;
601 :
602 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
603 : (void *) &localtag,
604 : HASH_FIND, NULL);
605 :
606 : /*
607 : * let the caller print its own error message, too. Do not ereport(ERROR).
608 : */
609 0 : if (!locallock || locallock->nLocks <= 0)
610 : {
611 0 : elog(WARNING, "you don't own a lock of type %s",
612 : lockMethodTable->lockModeNames[lockmode]);
613 0 : return false;
614 : }
615 :
616 : /*
617 : * Check the shared lock table.
618 : */
619 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
620 :
621 0 : LWLockAcquire(partitionLock, LW_SHARED);
622 :
623 : /*
624 : * We don't need to re-find the lock or proclock, since we kept their
625 : * addresses in the locallock table, and they couldn't have been removed
626 : * while we were holding a lock on them.
627 : */
628 0 : lock = locallock->lock;
629 : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
630 0 : proclock = locallock->proclock;
631 : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
632 :
633 : /*
634 : * Double-check that we are actually holding a lock of the type we want to
635 : * release.
636 : */
637 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
638 : {
639 : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
640 0 : LWLockRelease(partitionLock);
641 0 : elog(WARNING, "you don't own a lock of type %s",
642 : lockMethodTable->lockModeNames[lockmode]);
643 0 : RemoveLocalLock(locallock);
644 0 : return false;
645 : }
646 :
647 : /*
648 : * Do the checking.
649 : */
650 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
651 0 : hasWaiters = true;
652 :
653 0 : LWLockRelease(partitionLock);
654 :
655 0 : return hasWaiters;
656 : }
657 :
658 : /*
659 : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
660 : * set lock if/when no conflicts.
661 : *
662 : * Inputs:
663 : * locktag: unique identifier for the lockable object
664 : * lockmode: lock mode to acquire
665 : * sessionLock: if true, acquire lock for session not current transaction
666 : * dontWait: if true, don't wait to acquire lock
667 : *
668 : * Returns one of:
669 : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
670 : * LOCKACQUIRE_OK lock successfully acquired
671 : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
672 : *
673 : * In the normal case where dontWait=false and the caller doesn't need to
674 : * distinguish a freshly acquired lock from one already taken earlier in
675 : * this same transaction, there is no need to examine the return value.
676 : *
677 : * Side Effects: The lock is acquired and recorded in lock tables.
678 : *
679 : * NOTE: if we wait for the lock, there is no way to abort the wait
680 : * short of aborting the transaction.
681 : */
682 : LockAcquireResult
683 980547 : LockAcquire(const LOCKTAG *locktag,
684 : LOCKMODE lockmode,
685 : bool sessionLock,
686 : bool dontWait)
687 : {
688 980547 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait, true);
689 : }
690 :
691 : /*
692 : * LockAcquireExtended - allows us to specify additional options
693 : *
694 : * reportMemoryError specifies whether a lock request that fills the
695 : * lock table should generate an ERROR or not. This allows a priority
696 : * caller to note that the lock table is full and then begin taking
697 : * extreme action to reduce the number of other lock holders before
698 : * retrying the action.
699 : */
700 : LockAcquireResult
701 980547 : LockAcquireExtended(const LOCKTAG *locktag,
702 : LOCKMODE lockmode,
703 : bool sessionLock,
704 : bool dontWait,
705 : bool reportMemoryError)
706 : {
707 980547 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
708 : LockMethod lockMethodTable;
709 : LOCALLOCKTAG localtag;
710 : LOCALLOCK *locallock;
711 : LOCK *lock;
712 : PROCLOCK *proclock;
713 : bool found;
714 : ResourceOwner owner;
715 : uint32 hashcode;
716 : LWLock *partitionLock;
717 : int status;
718 980547 : bool log_lock = false;
719 :
720 980547 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
721 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
722 980547 : lockMethodTable = LockMethods[lockmethodid];
723 980547 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
724 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
725 :
726 980547 : if (RecoveryInProgress() && !InRecovery &&
727 0 : (locktag->locktag_type == LOCKTAG_OBJECT ||
728 0 : locktag->locktag_type == LOCKTAG_RELATION) &&
729 : lockmode > RowExclusiveLock)
730 0 : ereport(ERROR,
731 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
732 : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
733 : lockMethodTable->lockModeNames[lockmode]),
734 : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
735 :
736 : #ifdef LOCK_DEBUG
737 : if (LOCK_DEBUG_ENABLED(locktag))
738 : elog(LOG, "LockAcquire: lock [%u,%u] %s",
739 : locktag->locktag_field1, locktag->locktag_field2,
740 : lockMethodTable->lockModeNames[lockmode]);
741 : #endif
742 :
743 : /* Identify owner for lock */
744 980547 : if (sessionLock)
745 444 : owner = NULL;
746 : else
747 980103 : owner = CurrentResourceOwner;
748 :
749 : /*
750 : * Find or create a LOCALLOCK entry for this lock and lockmode
751 : */
752 980547 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
753 980547 : localtag.lock = *locktag;
754 980547 : localtag.mode = lockmode;
755 :
756 980547 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
757 : (void *) &localtag,
758 : HASH_ENTER, &found);
759 :
760 : /*
761 : * if it's a new locallock object, initialize it
762 : */
763 980547 : if (!found)
764 : {
765 879100 : locallock->lock = NULL;
766 879100 : locallock->proclock = NULL;
767 879100 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
768 879100 : locallock->nLocks = 0;
769 879100 : locallock->numLockOwners = 0;
770 879100 : locallock->maxLockOwners = 8;
771 879100 : locallock->holdsStrongLockCount = FALSE;
772 879100 : locallock->lockOwners = NULL; /* in case next line fails */
773 879100 : locallock->lockOwners = (LOCALLOCKOWNER *)
774 879100 : MemoryContextAlloc(TopMemoryContext,
775 879100 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
776 : }
777 : else
778 : {
779 : /* Make sure there will be room to remember the lock */
780 101447 : if (locallock->numLockOwners >= locallock->maxLockOwners)
781 : {
782 5 : int newsize = locallock->maxLockOwners * 2;
783 :
784 5 : locallock->lockOwners = (LOCALLOCKOWNER *)
785 5 : repalloc(locallock->lockOwners,
786 : newsize * sizeof(LOCALLOCKOWNER));
787 5 : locallock->maxLockOwners = newsize;
788 : }
789 : }
790 980547 : hashcode = locallock->hashcode;
791 :
792 : /*
793 : * If we already hold the lock, we can just increase the count locally.
794 : */
795 980547 : if (locallock->nLocks > 0)
796 : {
797 101447 : GrantLockLocal(locallock, owner);
798 101447 : return LOCKACQUIRE_ALREADY_HELD;
799 : }
800 :
801 : /*
802 : * Prepare to emit a WAL record if acquisition of this lock needs to be
803 : * replayed in a standby server.
804 : *
805 : * Here we prepare to log; after lock is acquired we'll issue log record.
806 : * This arrangement simplifies error recovery in case the preparation step
807 : * fails.
808 : *
809 : * Only AccessExclusiveLocks can conflict with lock types that read-only
810 : * transactions can acquire in a standby server. Make sure this definition
811 : * matches the one in GetRunningTransactionLocks().
812 : */
813 894480 : if (lockmode >= AccessExclusiveLock &&
814 24207 : locktag->locktag_type == LOCKTAG_RELATION &&
815 17654 : !RecoveryInProgress() &&
816 8827 : XLogStandbyInfoActive())
817 : {
818 8827 : LogAccessExclusiveLockPrepare();
819 8827 : log_lock = true;
820 : }
821 :
822 : /*
823 : * Attempt to take lock via fast path, if eligible. But if we remember
824 : * having filled up the fast path array, we don't attempt to make any
825 : * further use of it until we release some locks. It's possible that some
826 : * other backend has transferred some of those locks to the shared hash
827 : * table, leaving space free, but it's not worth acquiring the LWLock just
828 : * to check. It's also possible that we're acquiring a second or third
829 : * lock type on a relation we have already locked using the fast-path, but
830 : * for now we don't worry about that case either.
831 : */
832 1675466 : if (EligibleForRelationFastPath(locktag, lockmode) &&
833 796366 : FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
834 : {
835 792814 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
836 : bool acquired;
837 :
838 : /*
839 : * LWLockAcquire acts as a memory sequencing point, so it's safe to
840 : * assume that any strong locker whose increment to
841 : * FastPathStrongRelationLocks->counts becomes visible after we test
842 : * it has yet to begin to transfer fast-path locks.
843 : */
844 792814 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
845 792814 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
846 5694 : acquired = false;
847 : else
848 787120 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
849 : lockmode);
850 792814 : LWLockRelease(&MyProc->backendLock);
851 792814 : if (acquired)
852 : {
853 : /*
854 : * The locallock might contain stale pointers to some old shared
855 : * objects; we MUST reset these to null before considering the
856 : * lock to be acquired via fast-path.
857 : */
858 787120 : locallock->lock = NULL;
859 787120 : locallock->proclock = NULL;
860 787120 : GrantLockLocal(locallock, owner);
861 787120 : return LOCKACQUIRE_OK;
862 : }
863 : }
864 :
865 : /*
866 : * If this lock could potentially have been taken via the fast-path by
867 : * some other backend, we must (temporarily) disable further use of the
868 : * fast-path for this lock tag, and migrate any locks already taken via
869 : * this method to the main lock table.
870 : */
871 91980 : if (ConflictsWithRelationFastPath(locktag, lockmode))
872 : {
873 10966 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
874 :
875 10966 : BeginStrongLockAcquire(locallock, fasthashcode);
876 10966 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
877 : hashcode))
878 : {
879 0 : AbortStrongLockAcquire();
880 0 : if (reportMemoryError)
881 0 : ereport(ERROR,
882 : (errcode(ERRCODE_OUT_OF_MEMORY),
883 : errmsg("out of shared memory"),
884 : errhint("You might need to increase max_locks_per_transaction.")));
885 : else
886 0 : return LOCKACQUIRE_NOT_AVAIL;
887 : }
888 : }
889 :
890 : /*
891 : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
892 : * take it via the fast-path, either, so we've got to mess with the shared
893 : * lock table.
894 : */
895 91980 : partitionLock = LockHashPartitionLock(hashcode);
896 :
897 91980 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
898 :
899 : /*
900 : * Find or create lock and proclock entries with this tag
901 : *
902 : * Note: if the locallock object already existed, it might have a pointer
903 : * to the lock already ... but we should not assume that that pointer is
904 : * valid, since a lock object with zero hold and request counts can go
905 : * away anytime. So we have to use SetupLockInTable() to recompute the
906 : * lock and proclock pointers, even if they're already set.
907 : */
908 91980 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
909 : hashcode, lockmode);
910 91980 : if (!proclock)
911 : {
912 0 : AbortStrongLockAcquire();
913 0 : LWLockRelease(partitionLock);
914 0 : if (reportMemoryError)
915 0 : ereport(ERROR,
916 : (errcode(ERRCODE_OUT_OF_MEMORY),
917 : errmsg("out of shared memory"),
918 : errhint("You might need to increase max_locks_per_transaction.")));
919 : else
920 0 : return LOCKACQUIRE_NOT_AVAIL;
921 : }
922 91980 : locallock->proclock = proclock;
923 91980 : lock = proclock->tag.myLock;
924 91980 : locallock->lock = lock;
925 :
926 : /*
927 : * If lock requested conflicts with locks requested by waiters, must join
928 : * wait queue. Otherwise, check for conflict with already-held locks.
929 : * (That's last because most complex check.)
930 : */
931 91980 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
932 14 : status = STATUS_FOUND;
933 : else
934 91966 : status = LockCheckConflicts(lockMethodTable, lockmode,
935 : lock, proclock);
936 :
937 91980 : if (status == STATUS_OK)
938 : {
939 : /* No conflict with held or previously requested locks */
940 91954 : GrantLock(lock, proclock, lockmode);
941 91954 : GrantLockLocal(locallock, owner);
942 : }
943 : else
944 : {
945 26 : Assert(status == STATUS_FOUND);
946 :
947 : /*
948 : * We can't acquire the lock immediately. If caller specified no
949 : * blocking, remove useless table entries and return NOT_AVAIL without
950 : * waiting.
951 : */
952 26 : if (dontWait)
953 : {
954 12 : AbortStrongLockAcquire();
955 12 : if (proclock->holdMask == 0)
956 : {
957 : uint32 proclock_hashcode;
958 :
959 12 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
960 12 : SHMQueueDelete(&proclock->lockLink);
961 12 : SHMQueueDelete(&proclock->procLink);
962 12 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
963 12 : (void *) &(proclock->tag),
964 : proclock_hashcode,
965 : HASH_REMOVE,
966 : NULL))
967 0 : elog(PANIC, "proclock table corrupted");
968 : }
969 : else
970 : PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
971 12 : lock->nRequested--;
972 12 : lock->requested[lockmode]--;
973 : LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
974 12 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
975 12 : Assert(lock->nGranted <= lock->nRequested);
976 12 : LWLockRelease(partitionLock);
977 12 : if (locallock->nLocks == 0)
978 12 : RemoveLocalLock(locallock);
979 12 : return LOCKACQUIRE_NOT_AVAIL;
980 : }
981 :
982 : /*
983 : * Set bitmask of locks this process already holds on this object.
984 : */
985 14 : MyProc->heldLocks = proclock->holdMask;
986 :
987 : /*
988 : * Sleep till someone wakes me up.
989 : */
990 :
991 : TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
992 : locktag->locktag_field2,
993 : locktag->locktag_field3,
994 : locktag->locktag_field4,
995 : locktag->locktag_type,
996 : lockmode);
997 :
998 14 : WaitOnLock(locallock, owner);
999 :
1000 : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1001 : locktag->locktag_field2,
1002 : locktag->locktag_field3,
1003 : locktag->locktag_field4,
1004 : locktag->locktag_type,
1005 : lockmode);
1006 :
1007 : /*
1008 : * NOTE: do not do any material change of state between here and
1009 : * return. All required changes in locktable state must have been
1010 : * done when the lock was granted to us --- see notes in WaitOnLock.
1011 : */
1012 :
1013 : /*
1014 : * Check the proclock entry status, in case something in the ipc
1015 : * communication doesn't work correctly.
1016 : */
1017 14 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1018 : {
1019 0 : AbortStrongLockAcquire();
1020 : PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1021 : LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1022 : /* Should we retry ? */
1023 0 : LWLockRelease(partitionLock);
1024 0 : elog(ERROR, "LockAcquire failed");
1025 : }
1026 : PROCLOCK_PRINT("LockAcquire: granted", proclock);
1027 : LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1028 : }
1029 :
1030 : /*
1031 : * Lock state is fully up-to-date now; if we error out after this, no
1032 : * special error cleanup is required.
1033 : */
1034 91968 : FinishStrongLockAcquire();
1035 :
1036 91968 : LWLockRelease(partitionLock);
1037 :
1038 : /*
1039 : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1040 : * standby server.
1041 : */
1042 91968 : if (log_lock)
1043 : {
1044 : /*
1045 : * Decode the locktag back to the original values, to avoid sending
1046 : * lots of empty bytes with every message. See lock.h to check how a
1047 : * locktag is defined for LOCKTAG_RELATION
1048 : */
1049 8827 : LogAccessExclusiveLock(locktag->locktag_field1,
1050 : locktag->locktag_field2);
1051 : }
1052 :
1053 91968 : return LOCKACQUIRE_OK;
1054 : }
1055 :
1056 : /*
1057 : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1058 : * request.
1059 : *
1060 : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1061 : * for lack of shared memory.
1062 : *
1063 : * The appropriate partition lock must be held at entry, and will be
1064 : * held at exit.
1065 : */
1066 : static PROCLOCK *
1067 92113 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1068 : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1069 : {
1070 : LOCK *lock;
1071 : PROCLOCK *proclock;
1072 : PROCLOCKTAG proclocktag;
1073 : uint32 proclock_hashcode;
1074 : bool found;
1075 :
1076 : /*
1077 : * Find or create a lock with this tag.
1078 : */
1079 92113 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1080 : (const void *) locktag,
1081 : hashcode,
1082 : HASH_ENTER_NULL,
1083 : &found);
1084 92113 : if (!lock)
1085 0 : return NULL;
1086 :
1087 : /*
1088 : * if it's a new lock object, initialize it
1089 : */
1090 92113 : if (!found)
1091 : {
1092 87106 : lock->grantMask = 0;
1093 87106 : lock->waitMask = 0;
1094 87106 : SHMQueueInit(&(lock->procLocks));
1095 87106 : ProcQueueInit(&(lock->waitProcs));
1096 87106 : lock->nRequested = 0;
1097 87106 : lock->nGranted = 0;
1098 87106 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1099 87106 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1100 : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1101 : }
1102 : else
1103 : {
1104 : LOCK_PRINT("LockAcquire: found", lock, lockmode);
1105 5007 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1106 5007 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1107 5007 : Assert(lock->nGranted <= lock->nRequested);
1108 : }
1109 :
1110 : /*
1111 : * Create the hash key for the proclock table.
1112 : */
1113 92113 : proclocktag.myLock = lock;
1114 92113 : proclocktag.myProc = proc;
1115 :
1116 92113 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1117 :
1118 : /*
1119 : * Find or create a proclock entry with this tag
1120 : */
1121 92113 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1122 : (void *) &proclocktag,
1123 : proclock_hashcode,
1124 : HASH_ENTER_NULL,
1125 : &found);
1126 92113 : if (!proclock)
1127 : {
1128 : /* Oops, not enough shmem for the proclock */
1129 0 : if (lock->nRequested == 0)
1130 : {
1131 : /*
1132 : * There are no other requestors of this lock, so garbage-collect
1133 : * the lock object. We *must* do this to avoid a permanent leak
1134 : * of shared memory, because there won't be anything to cause
1135 : * anyone to release the lock object later.
1136 : */
1137 0 : Assert(SHMQueueEmpty(&(lock->procLocks)));
1138 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
1139 0 : (void *) &(lock->tag),
1140 : hashcode,
1141 : HASH_REMOVE,
1142 : NULL))
1143 0 : elog(PANIC, "lock table corrupted");
1144 : }
1145 0 : return NULL;
1146 : }
1147 :
1148 : /*
1149 : * If new, initialize the new entry
1150 : */
1151 92113 : if (!found)
1152 : {
1153 87870 : uint32 partition = LockHashPartition(hashcode);
1154 :
1155 : /*
1156 : * It might seem unsafe to access proclock->groupLeader without a
1157 : * lock, but it's not really. Either we are initializing a proclock
1158 : * on our own behalf, in which case our group leader isn't changing
1159 : * because the group leader for a process can only ever be changed by
1160 : * the process itself; or else we are transferring a fast-path lock to
1161 : * the main lock table, in which case that process can't change it's
1162 : * lock group leader without first releasing all of its locks (and in
1163 : * particular the one we are currently transferring).
1164 : */
1165 175740 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1166 87870 : proc->lockGroupLeader : proc;
1167 87870 : proclock->holdMask = 0;
1168 87870 : proclock->releaseMask = 0;
1169 : /* Add proclock to appropriate lists */
1170 87870 : SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
1171 87870 : SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
1172 : &proclock->procLink);
1173 : PROCLOCK_PRINT("LockAcquire: new", proclock);
1174 : }
1175 : else
1176 : {
1177 : PROCLOCK_PRINT("LockAcquire: found", proclock);
1178 4243 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1179 :
1180 : #ifdef CHECK_DEADLOCK_RISK
1181 :
1182 : /*
1183 : * Issue warning if we already hold a lower-level lock on this object
1184 : * and do not hold a lock of the requested level or higher. This
1185 : * indicates a deadlock-prone coding practice (eg, we'd have a
1186 : * deadlock if another backend were following the same code path at
1187 : * about the same time).
1188 : *
1189 : * This is not enabled by default, because it may generate log entries
1190 : * about user-level coding practices that are in fact safe in context.
1191 : * It can be enabled to help find system-level problems.
1192 : *
1193 : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1194 : * better to use a table. For now, though, this works.
1195 : */
1196 : {
1197 : int i;
1198 :
1199 : for (i = lockMethodTable->numLockModes; i > 0; i--)
1200 : {
1201 : if (proclock->holdMask & LOCKBIT_ON(i))
1202 : {
1203 : if (i >= (int) lockmode)
1204 : break; /* safe: we have a lock >= req level */
1205 : elog(LOG, "deadlock risk: raising lock level"
1206 : " from %s to %s on object %u/%u/%u",
1207 : lockMethodTable->lockModeNames[i],
1208 : lockMethodTable->lockModeNames[lockmode],
1209 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1210 : lock->tag.locktag_field3);
1211 : break;
1212 : }
1213 : }
1214 : }
1215 : #endif /* CHECK_DEADLOCK_RISK */
1216 : }
1217 :
1218 : /*
1219 : * lock->nRequested and lock->requested[] count the total number of
1220 : * requests, whether granted or waiting, so increment those immediately.
1221 : * The other counts don't increment till we get the lock.
1222 : */
1223 92113 : lock->nRequested++;
1224 92113 : lock->requested[lockmode]++;
1225 92113 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1226 :
1227 : /*
1228 : * We shouldn't already hold the desired lock; else locallock table is
1229 : * broken.
1230 : */
1231 92113 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
1232 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1233 : lockMethodTable->lockModeNames[lockmode],
1234 : lock->tag.locktag_field1, lock->tag.locktag_field2,
1235 : lock->tag.locktag_field3);
1236 :
1237 92113 : return proclock;
1238 : }
1239 :
1240 : /*
1241 : * Subroutine to free a locallock entry
1242 : */
1243 : static void
1244 879100 : RemoveLocalLock(LOCALLOCK *locallock)
1245 : {
1246 : int i;
1247 :
1248 884809 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1249 : {
1250 5709 : if (locallock->lockOwners[i].owner != NULL)
1251 5707 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1252 : }
1253 879100 : locallock->numLockOwners = 0;
1254 879100 : if (locallock->lockOwners != NULL)
1255 879100 : pfree(locallock->lockOwners);
1256 879100 : locallock->lockOwners = NULL;
1257 :
1258 879100 : if (locallock->holdsStrongLockCount)
1259 : {
1260 : uint32 fasthashcode;
1261 :
1262 10963 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1263 :
1264 10963 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1265 10963 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1266 10963 : FastPathStrongRelationLocks->count[fasthashcode]--;
1267 10963 : locallock->holdsStrongLockCount = FALSE;
1268 10963 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1269 : }
1270 :
1271 879100 : if (!hash_search(LockMethodLocalHash,
1272 879100 : (void *) &(locallock->tag),
1273 : HASH_REMOVE, NULL))
1274 0 : elog(WARNING, "locallock table corrupted");
1275 879100 : }
1276 :
1277 : /*
1278 : * LockCheckConflicts -- test whether requested lock conflicts
1279 : * with those already granted
1280 : *
1281 : * Returns STATUS_FOUND if conflict, STATUS_OK if no conflict.
1282 : *
1283 : * NOTES:
1284 : * Here's what makes this complicated: one process's locks don't
1285 : * conflict with one another, no matter what purpose they are held for
1286 : * (eg, session and transaction locks do not conflict). Nor do the locks
1287 : * of one process in a lock group conflict with those of another process in
1288 : * the same group. So, we must subtract off these locks when determining
1289 : * whether the requested new lock conflicts with those already held.
1290 : */
1291 : int
1292 91987 : LockCheckConflicts(LockMethod lockMethodTable,
1293 : LOCKMODE lockmode,
1294 : LOCK *lock,
1295 : PROCLOCK *proclock)
1296 : {
1297 91987 : int numLockModes = lockMethodTable->numLockModes;
1298 : LOCKMASK myLocks;
1299 91987 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1300 : int conflictsRemaining[MAX_LOCKMODES];
1301 91987 : int totalConflictsRemaining = 0;
1302 : int i;
1303 : SHM_QUEUE *procLocks;
1304 : PROCLOCK *otherproclock;
1305 :
1306 : /*
1307 : * first check for global conflicts: If no locks conflict with my request,
1308 : * then I get the lock.
1309 : *
1310 : * Checking for conflict: lock->grantMask represents the types of
1311 : * currently held locks. conflictTable[lockmode] has a bit set for each
1312 : * type of lock that conflicts with request. Bitwise compare tells if
1313 : * there is a conflict.
1314 : */
1315 91987 : if (!(conflictMask & lock->grantMask))
1316 : {
1317 : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1318 89020 : return STATUS_OK;
1319 : }
1320 :
1321 : /*
1322 : * Rats. Something conflicts. But it could still be my own lock, or a
1323 : * lock held by another member of my locking group. First, figure out how
1324 : * many conflicts remain after subtracting out any locks I hold myself.
1325 : */
1326 2967 : myLocks = proclock->holdMask;
1327 26703 : for (i = 1; i <= numLockModes; i++)
1328 : {
1329 23736 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1330 : {
1331 13777 : conflictsRemaining[i] = 0;
1332 13777 : continue;
1333 : }
1334 9959 : conflictsRemaining[i] = lock->granted[i];
1335 9959 : if (myLocks & LOCKBIT_ON(i))
1336 3151 : --conflictsRemaining[i];
1337 9959 : totalConflictsRemaining += conflictsRemaining[i];
1338 : }
1339 :
1340 : /* If no conflicts remain, we get the lock. */
1341 2967 : if (totalConflictsRemaining == 0)
1342 : {
1343 : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1344 2940 : return STATUS_OK;
1345 : }
1346 :
1347 : /* If no group locking, it's definitely a conflict. */
1348 27 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1349 : {
1350 12 : Assert(proclock->tag.myProc == MyProc);
1351 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1352 : proclock);
1353 12 : return STATUS_FOUND;
1354 : }
1355 :
1356 : /*
1357 : * Locks held in conflicting modes by members of our own lock group are
1358 : * not real conflicts; we can subtract those out and see if we still have
1359 : * a conflict. This is O(N) in the number of processes holding or
1360 : * awaiting locks on this object. We could improve that by making the
1361 : * shared memory state more complex (and larger) but it doesn't seem worth
1362 : * it.
1363 : */
1364 15 : procLocks = &(lock->procLocks);
1365 15 : otherproclock = (PROCLOCK *)
1366 : SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1367 55 : while (otherproclock != NULL)
1368 : {
1369 59 : if (proclock != otherproclock &&
1370 34 : proclock->groupLeader == otherproclock->groupLeader &&
1371 8 : (otherproclock->holdMask & conflictMask) != 0)
1372 : {
1373 8 : int intersectMask = otherproclock->holdMask & conflictMask;
1374 :
1375 72 : for (i = 1; i <= numLockModes; i++)
1376 : {
1377 64 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1378 : {
1379 8 : if (conflictsRemaining[i] <= 0)
1380 0 : elog(PANIC, "proclocks held do not match lock");
1381 8 : conflictsRemaining[i]--;
1382 8 : totalConflictsRemaining--;
1383 : }
1384 : }
1385 :
1386 8 : if (totalConflictsRemaining == 0)
1387 : {
1388 : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1389 : proclock);
1390 8 : return STATUS_OK;
1391 : }
1392 : }
1393 25 : otherproclock = (PROCLOCK *)
1394 25 : SHMQueueNext(procLocks, &otherproclock->lockLink,
1395 : offsetof(PROCLOCK, lockLink));
1396 : }
1397 :
1398 : /* Nope, it's a real conflict. */
1399 : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1400 7 : return STATUS_FOUND;
1401 : }
1402 :
1403 : /*
1404 : * GrantLock -- update the lock and proclock data structures to show
1405 : * the lock request has been granted.
1406 : *
1407 : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1408 : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1409 : *
1410 : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1411 : * table entry; but since we may be awaking some other process, we can't do
1412 : * that here; it's done by GrantLockLocal, instead.
1413 : */
1414 : void
1415 92101 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1416 : {
1417 92101 : lock->nGranted++;
1418 92101 : lock->granted[lockmode]++;
1419 92101 : lock->grantMask |= LOCKBIT_ON(lockmode);
1420 92101 : if (lock->granted[lockmode] == lock->requested[lockmode])
1421 92077 : lock->waitMask &= LOCKBIT_OFF(lockmode);
1422 92101 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1423 : LOCK_PRINT("GrantLock", lock, lockmode);
1424 92101 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1425 92101 : Assert(lock->nGranted <= lock->nRequested);
1426 92101 : }
1427 :
1428 : /*
1429 : * UnGrantLock -- opposite of GrantLock.
1430 : *
1431 : * Updates the lock and proclock data structures to show that the lock
1432 : * is no longer held nor requested by the current holder.
1433 : *
1434 : * Returns true if there were any waiters waiting on the lock that
1435 : * should now be woken up with ProcLockWakeup.
1436 : */
1437 : static bool
1438 92101 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1439 : PROCLOCK *proclock, LockMethod lockMethodTable)
1440 : {
1441 92101 : bool wakeupNeeded = false;
1442 :
1443 92101 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1444 92101 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1445 92101 : Assert(lock->nGranted <= lock->nRequested);
1446 :
1447 : /*
1448 : * fix the general lock stats
1449 : */
1450 92101 : lock->nRequested--;
1451 92101 : lock->requested[lockmode]--;
1452 92101 : lock->nGranted--;
1453 92101 : lock->granted[lockmode]--;
1454 :
1455 92101 : if (lock->granted[lockmode] == 0)
1456 : {
1457 : /* change the conflict mask. No more of this lock type. */
1458 91475 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1459 : }
1460 :
1461 : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1462 :
1463 : /*
1464 : * We need only run ProcLockWakeup if the released lock conflicts with at
1465 : * least one of the lock types requested by waiter(s). Otherwise whatever
1466 : * conflict made them wait must still exist. NOTE: before MVCC, we could
1467 : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1468 : * not true anymore, because the remaining granted locks might belong to
1469 : * some waiter, who could now be awakened because he doesn't conflict with
1470 : * his own locks.
1471 : */
1472 92101 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1473 12 : wakeupNeeded = true;
1474 :
1475 : /*
1476 : * Now fix the per-proclock state.
1477 : */
1478 92101 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1479 : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1480 :
1481 92101 : return wakeupNeeded;
1482 : }
1483 :
1484 : /*
1485 : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1486 : * proclock and lock objects if possible, and call ProcLockWakeup if there
1487 : * are remaining requests and the caller says it's OK. (Normally, this
1488 : * should be called after UnGrantLock, and wakeupNeeded is the result from
1489 : * UnGrantLock.)
1490 : *
1491 : * The appropriate partition lock must be held at entry, and will be
1492 : * held at exit.
1493 : */
1494 : static void
1495 90727 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1496 : LockMethod lockMethodTable, uint32 hashcode,
1497 : bool wakeupNeeded)
1498 : {
1499 : /*
1500 : * If this was my last hold on this lock, delete my entry in the proclock
1501 : * table.
1502 : */
1503 90727 : if (proclock->holdMask == 0)
1504 : {
1505 : uint32 proclock_hashcode;
1506 :
1507 : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1508 87858 : SHMQueueDelete(&proclock->lockLink);
1509 87858 : SHMQueueDelete(&proclock->procLink);
1510 87858 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1511 87858 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1512 87858 : (void *) &(proclock->tag),
1513 : proclock_hashcode,
1514 : HASH_REMOVE,
1515 : NULL))
1516 0 : elog(PANIC, "proclock table corrupted");
1517 : }
1518 :
1519 90727 : if (lock->nRequested == 0)
1520 : {
1521 : /*
1522 : * The caller just released the last lock, so garbage-collect the lock
1523 : * object.
1524 : */
1525 : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1526 87106 : Assert(SHMQueueEmpty(&(lock->procLocks)));
1527 87106 : if (!hash_search_with_hash_value(LockMethodLockHash,
1528 87106 : (void *) &(lock->tag),
1529 : hashcode,
1530 : HASH_REMOVE,
1531 : NULL))
1532 0 : elog(PANIC, "lock table corrupted");
1533 : }
1534 3621 : else if (wakeupNeeded)
1535 : {
1536 : /* There are waiters on this lock, so wake them up. */
1537 12 : ProcLockWakeup(lockMethodTable, lock);
1538 : }
1539 90727 : }
1540 :
1541 : /*
1542 : * GrantLockLocal -- update the locallock data structures to show
1543 : * the lock request has been granted.
1544 : *
1545 : * We expect that LockAcquire made sure there is room to add a new
1546 : * ResourceOwner entry.
1547 : */
1548 : static void
1549 980535 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1550 : {
1551 980535 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1552 : int i;
1553 :
1554 980535 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1555 : /* Count the total */
1556 980535 : locallock->nLocks++;
1557 : /* Count the per-owner lock */
1558 1021763 : for (i = 0; i < locallock->numLockOwners; i++)
1559 : {
1560 118087 : if (lockOwners[i].owner == owner)
1561 : {
1562 76859 : lockOwners[i].nLocks++;
1563 1057394 : return;
1564 : }
1565 : }
1566 903676 : lockOwners[i].owner = owner;
1567 903676 : lockOwners[i].nLocks = 1;
1568 903676 : locallock->numLockOwners++;
1569 903676 : if (owner != NULL)
1570 903240 : ResourceOwnerRememberLock(owner, locallock);
1571 : }
1572 :
1573 : /*
1574 : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1575 : * and arrange for error cleanup if it fails
1576 : */
1577 : static void
1578 10966 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1579 : {
1580 10966 : Assert(StrongLockInProgress == NULL);
1581 10966 : Assert(locallock->holdsStrongLockCount == FALSE);
1582 :
1583 : /*
1584 : * Adding to a memory location is not atomic, so we take a spinlock to
1585 : * ensure we don't collide with someone else trying to bump the count at
1586 : * the same time.
1587 : *
1588 : * XXX: It might be worth considering using an atomic fetch-and-add
1589 : * instruction here, on architectures where that is supported.
1590 : */
1591 :
1592 10966 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1593 10966 : FastPathStrongRelationLocks->count[fasthashcode]++;
1594 10966 : locallock->holdsStrongLockCount = TRUE;
1595 10966 : StrongLockInProgress = locallock;
1596 10966 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1597 10966 : }
1598 :
1599 : /*
1600 : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1601 : * acquisition once it's no longer needed
1602 : */
1603 : static void
1604 91968 : FinishStrongLockAcquire(void)
1605 : {
1606 91968 : StrongLockInProgress = NULL;
1607 91968 : }
1608 :
1609 : /*
1610 : * AbortStrongLockAcquire - undo strong lock state changes performed by
1611 : * BeginStrongLockAcquire.
1612 : */
1613 : void
1614 29861 : AbortStrongLockAcquire(void)
1615 : {
1616 : uint32 fasthashcode;
1617 29861 : LOCALLOCK *locallock = StrongLockInProgress;
1618 :
1619 29861 : if (locallock == NULL)
1620 59722 : return;
1621 :
1622 0 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1623 0 : Assert(locallock->holdsStrongLockCount == TRUE);
1624 0 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1625 0 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1626 0 : FastPathStrongRelationLocks->count[fasthashcode]--;
1627 0 : locallock->holdsStrongLockCount = FALSE;
1628 0 : StrongLockInProgress = NULL;
1629 0 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1630 : }
1631 :
1632 : /*
1633 : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1634 : * WaitOnLock on.
1635 : *
1636 : * proc.c needs this for the case where we are booted off the lock by
1637 : * timeout, but discover that someone granted us the lock anyway.
1638 : *
1639 : * We could just export GrantLockLocal, but that would require including
1640 : * resowner.h in lock.h, which creates circularity.
1641 : */
1642 : void
1643 14 : GrantAwaitedLock(void)
1644 : {
1645 14 : GrantLockLocal(awaitedLock, awaitedOwner);
1646 14 : }
1647 :
1648 : /*
1649 : * WaitOnLock -- wait to acquire a lock
1650 : *
1651 : * Caller must have set MyProc->heldLocks to reflect locks already held
1652 : * on the lockable object by this process.
1653 : *
1654 : * The appropriate partition lock must be held at entry.
1655 : */
1656 : static void
1657 14 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1658 : {
1659 14 : LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1660 14 : LockMethod lockMethodTable = LockMethods[lockmethodid];
1661 14 : char *volatile new_status = NULL;
1662 :
1663 : LOCK_PRINT("WaitOnLock: sleeping on lock",
1664 : locallock->lock, locallock->tag.mode);
1665 :
1666 : /* Report change to waiting status */
1667 14 : if (update_process_title)
1668 : {
1669 : const char *old_status;
1670 : int len;
1671 :
1672 14 : old_status = get_ps_display(&len);
1673 14 : new_status = (char *) palloc(len + 8 + 1);
1674 14 : memcpy(new_status, old_status, len);
1675 14 : strcpy(new_status + len, " waiting");
1676 14 : set_ps_display(new_status, false);
1677 14 : new_status[len] = '\0'; /* truncate off " waiting" */
1678 : }
1679 :
1680 14 : awaitedLock = locallock;
1681 14 : awaitedOwner = owner;
1682 :
1683 : /*
1684 : * NOTE: Think not to put any shared-state cleanup after the call to
1685 : * ProcSleep, in either the normal or failure path. The lock state must
1686 : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1687 : * waiting for the lock. This is necessary because of the possibility
1688 : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1689 : * grants us the lock, but before we've noticed it. Hence, after granting,
1690 : * the locktable state must fully reflect the fact that we own the lock;
1691 : * we can't do additional work on return.
1692 : *
1693 : * We can and do use a PG_TRY block to try to clean up after failure, but
1694 : * this still has a major limitation: elog(FATAL) can occur while waiting
1695 : * (eg, a "die" interrupt), and then control won't come back here. So all
1696 : * cleanup of essential state should happen in LockErrorCleanup, not here.
1697 : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1698 : * is unimportant if the process exits.
1699 : */
1700 14 : PG_TRY();
1701 : {
1702 14 : if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
1703 : {
1704 : /*
1705 : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1706 : * now.
1707 : */
1708 0 : awaitedLock = NULL;
1709 : LOCK_PRINT("WaitOnLock: aborting on lock",
1710 : locallock->lock, locallock->tag.mode);
1711 0 : LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1712 :
1713 : /*
1714 : * Now that we aren't holding the partition lock, we can give an
1715 : * error report including details about the detected deadlock.
1716 : */
1717 0 : DeadLockReport();
1718 : /* not reached */
1719 : }
1720 : }
1721 0 : PG_CATCH();
1722 : {
1723 : /* In this path, awaitedLock remains set until LockErrorCleanup */
1724 :
1725 : /* Report change to non-waiting status */
1726 0 : if (update_process_title)
1727 : {
1728 0 : set_ps_display(new_status, false);
1729 0 : pfree(new_status);
1730 : }
1731 :
1732 : /* and propagate the error */
1733 0 : PG_RE_THROW();
1734 : }
1735 14 : PG_END_TRY();
1736 :
1737 14 : awaitedLock = NULL;
1738 :
1739 : /* Report change to non-waiting status */
1740 14 : if (update_process_title)
1741 : {
1742 14 : set_ps_display(new_status, false);
1743 14 : pfree(new_status);
1744 : }
1745 :
1746 : LOCK_PRINT("WaitOnLock: wakeup on lock",
1747 : locallock->lock, locallock->tag.mode);
1748 14 : }
1749 :
1750 : /*
1751 : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1752 : * This is only used when the proc has failed to get the lock, so we set its
1753 : * waitStatus to STATUS_ERROR.
1754 : *
1755 : * Appropriate partition lock must be held by caller. Also, caller is
1756 : * responsible for signaling the proc if needed.
1757 : *
1758 : * NB: this does not clean up any locallock object that may exist for the lock.
1759 : */
1760 : void
1761 0 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1762 : {
1763 0 : LOCK *waitLock = proc->waitLock;
1764 0 : PROCLOCK *proclock = proc->waitProcLock;
1765 0 : LOCKMODE lockmode = proc->waitLockMode;
1766 0 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1767 :
1768 : /* Make sure proc is waiting */
1769 0 : Assert(proc->waitStatus == STATUS_WAITING);
1770 0 : Assert(proc->links.next != NULL);
1771 0 : Assert(waitLock);
1772 0 : Assert(waitLock->waitProcs.size > 0);
1773 0 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1774 :
1775 : /* Remove proc from lock's wait queue */
1776 0 : SHMQueueDelete(&(proc->links));
1777 0 : waitLock->waitProcs.size--;
1778 :
1779 : /* Undo increments of request counts by waiting process */
1780 0 : Assert(waitLock->nRequested > 0);
1781 0 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1782 0 : waitLock->nRequested--;
1783 0 : Assert(waitLock->requested[lockmode] > 0);
1784 0 : waitLock->requested[lockmode]--;
1785 : /* don't forget to clear waitMask bit if appropriate */
1786 0 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1787 0 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1788 :
1789 : /* Clean up the proc's own state, and pass it the ok/fail signal */
1790 0 : proc->waitLock = NULL;
1791 0 : proc->waitProcLock = NULL;
1792 0 : proc->waitStatus = STATUS_ERROR;
1793 :
1794 : /*
1795 : * Delete the proclock immediately if it represents no already-held locks.
1796 : * (This must happen now because if the owner of the lock decides to
1797 : * release it, and the requested/granted counts then go to zero,
1798 : * LockRelease expects there to be no remaining proclocks.) Then see if
1799 : * any other waiters for the lock can be woken up now.
1800 : */
1801 0 : CleanUpLock(waitLock, proclock,
1802 : LockMethods[lockmethodid], hashcode,
1803 : true);
1804 0 : }
1805 :
1806 : /*
1807 : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1808 : * Release a session lock if 'sessionLock' is true, else release a
1809 : * regular transaction lock.
1810 : *
1811 : * Side Effects: find any waiting processes that are now wakable,
1812 : * grant them their requested locks and awaken them.
1813 : * (We have to grant the lock here to avoid a race between
1814 : * the waking process and any new process to
1815 : * come along and request the lock.)
1816 : */
1817 : bool
1818 844773 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1819 : {
1820 844773 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1821 : LockMethod lockMethodTable;
1822 : LOCALLOCKTAG localtag;
1823 : LOCALLOCK *locallock;
1824 : LOCK *lock;
1825 : PROCLOCK *proclock;
1826 : LWLock *partitionLock;
1827 : bool wakeupNeeded;
1828 :
1829 844773 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1830 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1831 844773 : lockMethodTable = LockMethods[lockmethodid];
1832 844773 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1833 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
1834 :
1835 : #ifdef LOCK_DEBUG
1836 : if (LOCK_DEBUG_ENABLED(locktag))
1837 : elog(LOG, "LockRelease: lock [%u,%u] %s",
1838 : locktag->locktag_field1, locktag->locktag_field2,
1839 : lockMethodTable->lockModeNames[lockmode]);
1840 : #endif
1841 :
1842 : /*
1843 : * Find the LOCALLOCK entry for this lock and lockmode
1844 : */
1845 844773 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1846 844773 : localtag.lock = *locktag;
1847 844773 : localtag.mode = lockmode;
1848 :
1849 844773 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1850 : (void *) &localtag,
1851 : HASH_FIND, NULL);
1852 :
1853 : /*
1854 : * let the caller print its own error message, too. Do not ereport(ERROR).
1855 : */
1856 844773 : if (!locallock || locallock->nLocks <= 0)
1857 : {
1858 4 : elog(WARNING, "you don't own a lock of type %s",
1859 : lockMethodTable->lockModeNames[lockmode]);
1860 4 : return FALSE;
1861 : }
1862 :
1863 : /*
1864 : * Decrease the count for the resource owner.
1865 : */
1866 : {
1867 844769 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1868 : ResourceOwner owner;
1869 : int i;
1870 :
1871 : /* Identify owner for lock */
1872 844769 : if (sessionLock)
1873 442 : owner = NULL;
1874 : else
1875 844327 : owner = CurrentResourceOwner;
1876 :
1877 845341 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1878 : {
1879 845337 : if (lockOwners[i].owner == owner)
1880 : {
1881 844765 : Assert(lockOwners[i].nLocks > 0);
1882 844765 : if (--lockOwners[i].nLocks == 0)
1883 : {
1884 808595 : if (owner != NULL)
1885 808161 : ResourceOwnerForgetLock(owner, locallock);
1886 : /* compact out unused slot */
1887 808595 : locallock->numLockOwners--;
1888 808595 : if (i < locallock->numLockOwners)
1889 11 : lockOwners[i] = lockOwners[locallock->numLockOwners];
1890 : }
1891 844765 : break;
1892 : }
1893 : }
1894 844769 : if (i < 0)
1895 : {
1896 : /* don't release a lock belonging to another owner */
1897 4 : elog(WARNING, "you don't own a lock of type %s",
1898 : lockMethodTable->lockModeNames[lockmode]);
1899 4 : return FALSE;
1900 : }
1901 : }
1902 :
1903 : /*
1904 : * Decrease the total local count. If we're still holding the lock, we're
1905 : * done.
1906 : */
1907 844765 : locallock->nLocks--;
1908 :
1909 844765 : if (locallock->nLocks > 0)
1910 39608 : return TRUE;
1911 :
1912 : /* Attempt fast release of any lock eligible for the fast path. */
1913 1561904 : if (EligibleForRelationFastPath(locktag, lockmode) &&
1914 756747 : FastPathLocalUseCount > 0)
1915 : {
1916 : bool released;
1917 :
1918 : /*
1919 : * We might not find the lock here, even if we originally entered it
1920 : * here. Another backend may have moved it to the main table.
1921 : */
1922 754751 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
1923 754751 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
1924 : lockmode);
1925 754751 : LWLockRelease(&MyProc->backendLock);
1926 754751 : if (released)
1927 : {
1928 748567 : RemoveLocalLock(locallock);
1929 748567 : return TRUE;
1930 : }
1931 : }
1932 :
1933 : /*
1934 : * Otherwise we've got to mess with the shared lock table.
1935 : */
1936 56590 : partitionLock = LockHashPartitionLock(locallock->hashcode);
1937 :
1938 56590 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1939 :
1940 : /*
1941 : * Normally, we don't need to re-find the lock or proclock, since we kept
1942 : * their addresses in the locallock table, and they couldn't have been
1943 : * removed while we were holding a lock on them. But it's possible that
1944 : * the lock was taken fast-path and has since been moved to the main hash
1945 : * table by another backend, in which case we will need to look up the
1946 : * objects here. We assume the lock field is NULL if so.
1947 : */
1948 56590 : lock = locallock->lock;
1949 56590 : if (!lock)
1950 : {
1951 : PROCLOCKTAG proclocktag;
1952 :
1953 0 : Assert(EligibleForRelationFastPath(locktag, lockmode));
1954 0 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1955 : (const void *) locktag,
1956 : locallock->hashcode,
1957 : HASH_FIND,
1958 : NULL);
1959 0 : if (!lock)
1960 0 : elog(ERROR, "failed to re-find shared lock object");
1961 0 : locallock->lock = lock;
1962 :
1963 0 : proclocktag.myLock = lock;
1964 0 : proclocktag.myProc = MyProc;
1965 0 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
1966 : (void *) &proclocktag,
1967 : HASH_FIND,
1968 : NULL);
1969 0 : if (!locallock->proclock)
1970 0 : elog(ERROR, "failed to re-find shared proclock object");
1971 : }
1972 : LOCK_PRINT("LockRelease: found", lock, lockmode);
1973 56590 : proclock = locallock->proclock;
1974 : PROCLOCK_PRINT("LockRelease: found", proclock);
1975 :
1976 : /*
1977 : * Double-check that we are actually holding a lock of the type we want to
1978 : * release.
1979 : */
1980 56590 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1981 : {
1982 : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
1983 0 : LWLockRelease(partitionLock);
1984 0 : elog(WARNING, "you don't own a lock of type %s",
1985 : lockMethodTable->lockModeNames[lockmode]);
1986 0 : RemoveLocalLock(locallock);
1987 0 : return FALSE;
1988 : }
1989 :
1990 : /*
1991 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
1992 : */
1993 56590 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
1994 :
1995 56590 : CleanUpLock(lock, proclock,
1996 : lockMethodTable, locallock->hashcode,
1997 : wakeupNeeded);
1998 :
1999 56590 : LWLockRelease(partitionLock);
2000 :
2001 56590 : RemoveLocalLock(locallock);
2002 56590 : return TRUE;
2003 : }
2004 :
2005 : /*
2006 : * LockReleaseAll -- Release all locks of the specified lock method that
2007 : * are held by the current process.
2008 : *
2009 : * Well, not necessarily *all* locks. The available behaviors are:
2010 : * allLocks == true: release all locks including session locks.
2011 : * allLocks == false: release all non-session locks.
2012 : */
2013 : void
2014 52775 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2015 : {
2016 : HASH_SEQ_STATUS status;
2017 : LockMethod lockMethodTable;
2018 : int i,
2019 : numLockModes;
2020 : LOCALLOCK *locallock;
2021 : LOCK *lock;
2022 : PROCLOCK *proclock;
2023 : int partition;
2024 52775 : bool have_fast_path_lwlock = false;
2025 :
2026 52775 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2027 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2028 52775 : lockMethodTable = LockMethods[lockmethodid];
2029 :
2030 : #ifdef LOCK_DEBUG
2031 : if (*(lockMethodTable->trace_flag))
2032 : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2033 : #endif
2034 :
2035 : /*
2036 : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2037 : * the only way that the lock we hold on our own VXID can ever get
2038 : * released: it is always and only released when a toplevel transaction
2039 : * ends.
2040 : */
2041 52775 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2042 26218 : VirtualXactLockTableCleanup();
2043 :
2044 52775 : numLockModes = lockMethodTable->numLockModes;
2045 :
2046 : /*
2047 : * First we run through the locallock table and get rid of unwanted
2048 : * entries, then we scan the process's proclocks and get rid of those. We
2049 : * do this separately because we may have multiple locallock entries
2050 : * pointing to the same proclock, and we daren't end up with any dangling
2051 : * pointers. Fast-path locks are cleaned up during the locallock table
2052 : * scan, though.
2053 : */
2054 52775 : hash_seq_init(&status, LockMethodLocalHash);
2055 :
2056 180591 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2057 : {
2058 : /*
2059 : * If the LOCALLOCK entry is unused, we must've run out of shared
2060 : * memory while trying to set up this lock. Just forget the local
2061 : * entry.
2062 : */
2063 75041 : if (locallock->nLocks == 0)
2064 : {
2065 0 : RemoveLocalLock(locallock);
2066 0 : continue;
2067 : }
2068 :
2069 : /* Ignore items that are not of the lockmethod to be removed */
2070 75041 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2071 571 : continue;
2072 :
2073 : /*
2074 : * If we are asked to release all locks, we can just zap the entry.
2075 : * Otherwise, must scan to see if there are session locks. We assume
2076 : * there is at most one lockOwners entry for session locks.
2077 : */
2078 74470 : if (!allLocks)
2079 : {
2080 69318 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2081 :
2082 : /* If session lock is above array position 0, move it down to 0 */
2083 139115 : for (i = 0; i < locallock->numLockOwners; i++)
2084 : {
2085 69797 : if (lockOwners[i].owner == NULL)
2086 563 : lockOwners[0] = lockOwners[i];
2087 : else
2088 69234 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2089 : }
2090 :
2091 138636 : if (locallock->numLockOwners > 0 &&
2092 69881 : lockOwners[0].owner == NULL &&
2093 563 : lockOwners[0].nLocks > 0)
2094 : {
2095 : /* Fix the locallock to show just the session locks */
2096 563 : locallock->nLocks = lockOwners[0].nLocks;
2097 563 : locallock->numLockOwners = 1;
2098 : /* We aren't deleting this locallock, so done */
2099 563 : continue;
2100 : }
2101 : else
2102 68755 : locallock->numLockOwners = 0;
2103 : }
2104 :
2105 : /*
2106 : * If the lock or proclock pointers are NULL, this lock was taken via
2107 : * the relation fast-path (and is not known to have been transferred).
2108 : */
2109 73907 : if (locallock->proclock == NULL || locallock->lock == NULL)
2110 : {
2111 38545 : LOCKMODE lockmode = locallock->tag.mode;
2112 : Oid relid;
2113 :
2114 : /* Verify that a fast-path lock is what we've got. */
2115 38545 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2116 0 : elog(PANIC, "locallock table corrupted");
2117 :
2118 : /*
2119 : * If we don't currently hold the LWLock that protects our
2120 : * fast-path data structures, we must acquire it before attempting
2121 : * to release the lock via the fast-path. We will continue to
2122 : * hold the LWLock until we're done scanning the locallock table,
2123 : * unless we hit a transferred fast-path lock. (XXX is this
2124 : * really such a good idea? There could be a lot of entries ...)
2125 : */
2126 38545 : if (!have_fast_path_lwlock)
2127 : {
2128 12711 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
2129 12711 : have_fast_path_lwlock = true;
2130 : }
2131 :
2132 : /* Attempt fast-path release. */
2133 38545 : relid = locallock->tag.lock.locktag_field2;
2134 38545 : if (FastPathUnGrantRelationLock(relid, lockmode))
2135 : {
2136 38420 : RemoveLocalLock(locallock);
2137 38420 : continue;
2138 : }
2139 :
2140 : /*
2141 : * Our lock, originally taken via the fast path, has been
2142 : * transferred to the main lock table. That's going to require
2143 : * some extra work, so release our fast-path lock before starting.
2144 : */
2145 125 : LWLockRelease(&MyProc->backendLock);
2146 125 : have_fast_path_lwlock = false;
2147 :
2148 : /*
2149 : * Now dump the lock. We haven't got a pointer to the LOCK or
2150 : * PROCLOCK in this case, so we have to handle this a bit
2151 : * differently than a normal lock release. Unfortunately, this
2152 : * requires an extra LWLock acquire-and-release cycle on the
2153 : * partitionLock, but hopefully it shouldn't happen often.
2154 : */
2155 125 : LockRefindAndRelease(lockMethodTable, MyProc,
2156 : &locallock->tag.lock, lockmode, false);
2157 125 : RemoveLocalLock(locallock);
2158 125 : continue;
2159 : }
2160 :
2161 : /* Mark the proclock to show we need to release this lockmode */
2162 35362 : if (locallock->nLocks > 0)
2163 35362 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2164 :
2165 : /* And remove the locallock hashtable entry */
2166 35362 : RemoveLocalLock(locallock);
2167 : }
2168 :
2169 : /* Done with the fast-path data structures */
2170 52775 : if (have_fast_path_lwlock)
2171 12586 : LWLockRelease(&MyProc->backendLock);
2172 :
2173 : /*
2174 : * Now, scan each lock partition separately.
2175 : */
2176 897175 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2177 : {
2178 : LWLock *partitionLock;
2179 844400 : SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2180 : PROCLOCK *nextplock;
2181 :
2182 844400 : partitionLock = LockHashPartitionLockByIndex(partition);
2183 :
2184 : /*
2185 : * If the proclock list for this partition is empty, we can skip
2186 : * acquiring the partition lock. This optimization is trickier than
2187 : * it looks, because another backend could be in process of adding
2188 : * something to our proclock list due to promoting one of our
2189 : * fast-path locks. However, any such lock must be one that we
2190 : * decided not to delete above, so it's okay to skip it again now;
2191 : * we'd just decide not to delete it again. We must, however, be
2192 : * careful to re-fetch the list header once we've acquired the
2193 : * partition lock, to be sure we have a valid, up-to-date pointer.
2194 : * (There is probably no significant risk if pointer fetch/store is
2195 : * atomic, but we don't wish to assume that.)
2196 : *
2197 : * XXX This argument assumes that the locallock table correctly
2198 : * represents all of our fast-path locks. While allLocks mode
2199 : * guarantees to clean up all of our normal locks regardless of the
2200 : * locallock situation, we lose that guarantee for fast-path locks.
2201 : * This is not ideal.
2202 : */
2203 844400 : if (SHMQueueNext(procLocks, procLocks,
2204 : offsetof(PROCLOCK, procLink)) == NULL)
2205 814930 : continue; /* needn't examine this partition */
2206 :
2207 29470 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2208 :
2209 94051 : for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2210 : offsetof(PROCLOCK, procLink));
2211 : proclock;
2212 35111 : proclock = nextplock)
2213 : {
2214 35111 : bool wakeupNeeded = false;
2215 :
2216 : /* Get link first, since we may unlink/delete this proclock */
2217 35111 : nextplock = (PROCLOCK *)
2218 35111 : SHMQueueNext(procLocks, &proclock->procLink,
2219 : offsetof(PROCLOCK, procLink));
2220 :
2221 35111 : Assert(proclock->tag.myProc == MyProc);
2222 :
2223 35111 : lock = proclock->tag.myLock;
2224 :
2225 : /* Ignore items that are not of the lockmethod to be removed */
2226 35111 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2227 571 : continue;
2228 :
2229 : /*
2230 : * In allLocks mode, force release of all locks even if locallock
2231 : * table had problems
2232 : */
2233 34540 : if (allLocks)
2234 2367 : proclock->releaseMask = proclock->holdMask;
2235 : else
2236 32173 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2237 :
2238 : /*
2239 : * Ignore items that have nothing to be released, unless they have
2240 : * holdMask == 0 and are therefore recyclable
2241 : */
2242 34540 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2243 552 : continue;
2244 :
2245 : PROCLOCK_PRINT("LockReleaseAll", proclock);
2246 : LOCK_PRINT("LockReleaseAll", lock, 0);
2247 33988 : Assert(lock->nRequested >= 0);
2248 33988 : Assert(lock->nGranted >= 0);
2249 33988 : Assert(lock->nGranted <= lock->nRequested);
2250 33988 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2251 :
2252 : /*
2253 : * Release the previously-marked lock modes
2254 : */
2255 305892 : for (i = 1; i <= numLockModes; i++)
2256 : {
2257 271904 : if (proclock->releaseMask & LOCKBIT_ON(i))
2258 35362 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2259 : lockMethodTable);
2260 : }
2261 33988 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2262 33988 : Assert(lock->nGranted <= lock->nRequested);
2263 : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2264 :
2265 33988 : proclock->releaseMask = 0;
2266 :
2267 : /* CleanUpLock will wake up waiters if needed. */
2268 67976 : CleanUpLock(lock, proclock,
2269 : lockMethodTable,
2270 33988 : LockTagHashCode(&lock->tag),
2271 : wakeupNeeded);
2272 : } /* loop over PROCLOCKs within this partition */
2273 :
2274 29470 : LWLockRelease(partitionLock);
2275 : } /* loop over partitions */
2276 :
2277 : #ifdef LOCK_DEBUG
2278 : if (*(lockMethodTable->trace_flag))
2279 : elog(LOG, "LockReleaseAll done");
2280 : #endif
2281 52775 : }
2282 :
2283 : /*
2284 : * LockReleaseSession -- Release all session locks of the specified lock method
2285 : * that are held by the current process.
2286 : */
2287 : void
2288 3 : LockReleaseSession(LOCKMETHODID lockmethodid)
2289 : {
2290 : HASH_SEQ_STATUS status;
2291 : LOCALLOCK *locallock;
2292 :
2293 3 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2294 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2295 :
2296 3 : hash_seq_init(&status, LockMethodLocalHash);
2297 :
2298 19 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2299 : {
2300 : /* Ignore items that are not of the specified lock method */
2301 13 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2302 1 : continue;
2303 :
2304 12 : ReleaseLockIfHeld(locallock, true);
2305 : }
2306 3 : }
2307 :
2308 : /*
2309 : * LockReleaseCurrentOwner
2310 : * Release all locks belonging to CurrentResourceOwner
2311 : *
2312 : * If the caller knows what those locks are, it can pass them as an array.
2313 : * That speeds up the call significantly, when a lot of locks are held.
2314 : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2315 : * table to find them.
2316 : */
2317 : void
2318 349 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2319 : {
2320 349 : if (locallocks == NULL)
2321 : {
2322 : HASH_SEQ_STATUS status;
2323 : LOCALLOCK *locallock;
2324 :
2325 0 : hash_seq_init(&status, LockMethodLocalHash);
2326 :
2327 0 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2328 0 : ReleaseLockIfHeld(locallock, false);
2329 : }
2330 : else
2331 : {
2332 : int i;
2333 :
2334 535 : for (i = nlocks - 1; i >= 0; i--)
2335 186 : ReleaseLockIfHeld(locallocks[i], false);
2336 : }
2337 349 : }
2338 :
2339 : /*
2340 : * ReleaseLockIfHeld
2341 : * Release any session-level locks on this lockable object if sessionLock
2342 : * is true; else, release any locks held by CurrentResourceOwner.
2343 : *
2344 : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2345 : * locks), but without refactoring LockRelease() we cannot support releasing
2346 : * locks belonging to resource owners other than CurrentResourceOwner.
2347 : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2348 : * do a hashtable lookup of the locallock, too. However, currently this
2349 : * function isn't used heavily enough to justify refactoring for its
2350 : * convenience.
2351 : */
2352 : static void
2353 198 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2354 : {
2355 : ResourceOwner owner;
2356 : LOCALLOCKOWNER *lockOwners;
2357 : int i;
2358 :
2359 : /* Identify owner for lock (must match LockRelease!) */
2360 198 : if (sessionLock)
2361 12 : owner = NULL;
2362 : else
2363 186 : owner = CurrentResourceOwner;
2364 :
2365 : /* Scan to see if there are any locks belonging to the target owner */
2366 198 : lockOwners = locallock->lockOwners;
2367 202 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2368 : {
2369 198 : if (lockOwners[i].owner == owner)
2370 : {
2371 194 : Assert(lockOwners[i].nLocks > 0);
2372 194 : if (lockOwners[i].nLocks < locallock->nLocks)
2373 : {
2374 : /*
2375 : * We will still hold this lock after forgetting this
2376 : * ResourceOwner.
2377 : */
2378 63 : locallock->nLocks -= lockOwners[i].nLocks;
2379 : /* compact out unused slot */
2380 63 : locallock->numLockOwners--;
2381 63 : if (owner != NULL)
2382 63 : ResourceOwnerForgetLock(owner, locallock);
2383 63 : if (i < locallock->numLockOwners)
2384 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2385 : }
2386 : else
2387 : {
2388 131 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2389 : /* We want to call LockRelease just once */
2390 131 : lockOwners[i].nLocks = 1;
2391 131 : locallock->nLocks = 1;
2392 131 : if (!LockRelease(&locallock->tag.lock,
2393 : locallock->tag.mode,
2394 : sessionLock))
2395 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2396 : }
2397 194 : break;
2398 : }
2399 : }
2400 198 : }
2401 :
2402 : /*
2403 : * LockReassignCurrentOwner
2404 : * Reassign all locks belonging to CurrentResourceOwner to belong
2405 : * to its parent resource owner.
2406 : *
2407 : * If the caller knows what those locks are, it can pass them as an array.
2408 : * That speeds up the call significantly, when a lot of locks are held
2409 : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2410 : * and we'll traverse through our hash table to find them.
2411 : */
2412 : void
2413 25233 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2414 : {
2415 25233 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2416 :
2417 25233 : Assert(parent != NULL);
2418 :
2419 25233 : if (locallocks == NULL)
2420 : {
2421 : HASH_SEQ_STATUS status;
2422 : LOCALLOCK *locallock;
2423 :
2424 198 : hash_seq_init(&status, LockMethodLocalHash);
2425 :
2426 5624 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2427 5228 : LockReassignOwner(locallock, parent);
2428 : }
2429 : else
2430 : {
2431 : int i;
2432 :
2433 65323 : for (i = nlocks - 1; i >= 0; i--)
2434 40288 : LockReassignOwner(locallocks[i], parent);
2435 : }
2436 25233 : }
2437 :
2438 : /*
2439 : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2440 : * CurrentResourceOwner to its parent.
2441 : */
2442 : static void
2443 45516 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2444 : {
2445 : LOCALLOCKOWNER *lockOwners;
2446 : int i;
2447 45516 : int ic = -1;
2448 45516 : int ip = -1;
2449 :
2450 : /*
2451 : * Scan to see if there are any locks belonging to current owner or its
2452 : * parent
2453 : */
2454 45516 : lockOwners = locallock->lockOwners;
2455 112741 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2456 : {
2457 67225 : if (lockOwners[i].owner == CurrentResourceOwner)
2458 45274 : ic = i;
2459 21951 : else if (lockOwners[i].owner == parent)
2460 20317 : ip = i;
2461 : }
2462 :
2463 45516 : if (ic < 0)
2464 45758 : return; /* no current locks */
2465 :
2466 45274 : if (ip < 0)
2467 : {
2468 : /* Parent has no slot, so just give it the child's slot */
2469 25199 : lockOwners[ic].owner = parent;
2470 25199 : ResourceOwnerRememberLock(parent, locallock);
2471 : }
2472 : else
2473 : {
2474 : /* Merge child's count with parent's */
2475 20075 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2476 : /* compact out unused slot */
2477 20075 : locallock->numLockOwners--;
2478 20075 : if (ic < locallock->numLockOwners)
2479 149 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2480 : }
2481 45274 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2482 : }
2483 :
2484 : /*
2485 : * FastPathGrantRelationLock
2486 : * Grant lock using per-backend fast-path array, if there is space.
2487 : */
2488 : static bool
2489 787120 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2490 : {
2491 : uint32 f;
2492 787120 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2493 :
2494 : /* Scan for existing entry for this relid, remembering empty slot. */
2495 13311549 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2496 : {
2497 12551423 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2498 10976205 : unused_slot = f;
2499 1575218 : else if (MyProc->fpRelId[f] == relid)
2500 : {
2501 26994 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2502 26994 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2503 26994 : return true;
2504 : }
2505 : }
2506 :
2507 : /* If no existing entry, use any empty slot. */
2508 760126 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2509 : {
2510 760126 : MyProc->fpRelId[unused_slot] = relid;
2511 760126 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2512 760126 : ++FastPathLocalUseCount;
2513 760126 : return true;
2514 : }
2515 :
2516 : /* No existing entry, and no empty slot. */
2517 0 : return false;
2518 : }
2519 :
2520 : /*
2521 : * FastPathUnGrantRelationLock
2522 : * Release fast-path lock, if present. Update backend-private local
2523 : * use count, while we're at it.
2524 : */
2525 : static bool
2526 793296 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2527 : {
2528 : uint32 f;
2529 793296 : bool result = false;
2530 :
2531 793296 : FastPathLocalUseCount = 0;
2532 13486032 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2533 : {
2534 12692736 : if (MyProc->fpRelId[f] == relid
2535 1088665 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2536 : {
2537 786987 : Assert(!result);
2538 786987 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2539 786987 : result = true;
2540 : /* we continue iterating so as to update FastPathLocalUseCount */
2541 : }
2542 12692736 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2543 1672723 : ++FastPathLocalUseCount;
2544 : }
2545 793296 : return result;
2546 : }
2547 :
2548 : /*
2549 : * FastPathTransferRelationLocks
2550 : * Transfer locks matching the given lock tag from per-backend fast-path
2551 : * arrays to the shared hash table.
2552 : *
2553 : * Returns true if successful, false if ran out of shared memory.
2554 : */
2555 : static bool
2556 10966 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2557 : uint32 hashcode)
2558 : {
2559 10966 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
2560 10966 : Oid relid = locktag->locktag_field2;
2561 : uint32 i;
2562 :
2563 : /*
2564 : * Every PGPROC that can potentially hold a fast-path lock is present in
2565 : * ProcGlobal->allProcs. Prepared transactions are not, but any
2566 : * outstanding fast-path locks held by prepared transactions are
2567 : * transferred to the main lock table.
2568 : */
2569 1283022 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2570 : {
2571 1272056 : PGPROC *proc = &ProcGlobal->allProcs[i];
2572 : uint32 f;
2573 :
2574 1272056 : LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
2575 :
2576 : /*
2577 : * If the target backend isn't referencing the same database as the
2578 : * lock, then we needn't examine the individual relation IDs at all;
2579 : * none of them can be relevant.
2580 : *
2581 : * proc->databaseId is set at backend startup time and never changes
2582 : * thereafter, so it might be safe to perform this test before
2583 : * acquiring &proc->backendLock. In particular, it's certainly safe
2584 : * to assume that if the target backend holds any fast-path locks, it
2585 : * must have performed a memory-fencing operation (in particular, an
2586 : * LWLock acquisition) since setting proc->databaseId. However, it's
2587 : * less clear that our backend is certain to have performed a memory
2588 : * fencing operation since the other backend set proc->databaseId. So
2589 : * for now, we test it after acquiring the LWLock just to be safe.
2590 : */
2591 1272056 : if (proc->databaseId != locktag->locktag_field1)
2592 : {
2593 1159361 : LWLockRelease(&proc->backendLock);
2594 1159361 : continue;
2595 : }
2596 :
2597 3831344 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2598 : {
2599 : uint32 lockmode;
2600 :
2601 : /* Look for an allocated slot matching the given relid. */
2602 1803095 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2603 1802977 : continue;
2604 :
2605 : /* Find or create lock object. */
2606 118 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2607 590 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2608 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
2609 354 : ++lockmode)
2610 : {
2611 : PROCLOCK *proclock;
2612 :
2613 354 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2614 229 : continue;
2615 125 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2616 : hashcode, lockmode);
2617 125 : if (!proclock)
2618 : {
2619 0 : LWLockRelease(partitionLock);
2620 0 : LWLockRelease(&proc->backendLock);
2621 0 : return false;
2622 : }
2623 125 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2624 125 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2625 : }
2626 118 : LWLockRelease(partitionLock);
2627 :
2628 : /* No need to examine remaining slots. */
2629 118 : break;
2630 : }
2631 112695 : LWLockRelease(&proc->backendLock);
2632 : }
2633 10966 : return true;
2634 : }
2635 :
2636 : /*
2637 : * FastPathGetLockEntry
2638 : * Return the PROCLOCK for a lock originally taken via the fast-path,
2639 : * transferring it to the primary lock table if necessary.
2640 : *
2641 : * Note: caller takes care of updating the locallock object.
2642 : */
2643 : static PROCLOCK *
2644 8 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2645 : {
2646 8 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2647 8 : LOCKTAG *locktag = &locallock->tag.lock;
2648 8 : PROCLOCK *proclock = NULL;
2649 8 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2650 8 : Oid relid = locktag->locktag_field2;
2651 : uint32 f;
2652 :
2653 8 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
2654 :
2655 256 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2656 : {
2657 : uint32 lockmode;
2658 :
2659 : /* Look for an allocated slot matching the given relid. */
2660 128 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2661 120 : continue;
2662 :
2663 : /* If we don't have a lock of the given mode, forget it! */
2664 8 : lockmode = locallock->tag.mode;
2665 8 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2666 0 : break;
2667 :
2668 : /* Find or create lock object. */
2669 8 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2670 :
2671 8 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2672 : locallock->hashcode, lockmode);
2673 8 : if (!proclock)
2674 : {
2675 0 : LWLockRelease(partitionLock);
2676 0 : LWLockRelease(&MyProc->backendLock);
2677 0 : ereport(ERROR,
2678 : (errcode(ERRCODE_OUT_OF_MEMORY),
2679 : errmsg("out of shared memory"),
2680 : errhint("You might need to increase max_locks_per_transaction.")));
2681 : }
2682 8 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2683 8 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2684 :
2685 8 : LWLockRelease(partitionLock);
2686 :
2687 : /* No need to examine remaining slots. */
2688 8 : break;
2689 : }
2690 :
2691 8 : LWLockRelease(&MyProc->backendLock);
2692 :
2693 : /* Lock may have already been transferred by some other backend. */
2694 8 : if (proclock == NULL)
2695 : {
2696 : LOCK *lock;
2697 : PROCLOCKTAG proclocktag;
2698 : uint32 proclock_hashcode;
2699 :
2700 0 : LWLockAcquire(partitionLock, LW_SHARED);
2701 :
2702 0 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2703 : (void *) locktag,
2704 : locallock->hashcode,
2705 : HASH_FIND,
2706 : NULL);
2707 0 : if (!lock)
2708 0 : elog(ERROR, "failed to re-find shared lock object");
2709 :
2710 0 : proclocktag.myLock = lock;
2711 0 : proclocktag.myProc = MyProc;
2712 :
2713 0 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2714 0 : proclock = (PROCLOCK *)
2715 0 : hash_search_with_hash_value(LockMethodProcLockHash,
2716 : (void *) &proclocktag,
2717 : proclock_hashcode,
2718 : HASH_FIND,
2719 : NULL);
2720 0 : if (!proclock)
2721 0 : elog(ERROR, "failed to re-find shared proclock object");
2722 0 : LWLockRelease(partitionLock);
2723 : }
2724 :
2725 8 : return proclock;
2726 : }
2727 :
2728 : /*
2729 : * GetLockConflicts
2730 : * Get an array of VirtualTransactionIds of xacts currently holding locks
2731 : * that would conflict with the specified lock/lockmode.
2732 : * xacts merely awaiting such a lock are NOT reported.
2733 : *
2734 : * The result array is palloc'd and is terminated with an invalid VXID.
2735 : *
2736 : * Of course, the result could be out of date by the time it's returned,
2737 : * so use of this function has to be thought about carefully.
2738 : *
2739 : * Note we never include the current xact's vxid in the result array,
2740 : * since an xact never blocks itself. Also, prepared transactions are
2741 : * ignored, which is a bit more debatable but is appropriate for current
2742 : * uses of the result.
2743 : */
2744 : VirtualTransactionId *
2745 23 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
2746 : {
2747 : static VirtualTransactionId *vxids;
2748 23 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2749 : LockMethod lockMethodTable;
2750 : LOCK *lock;
2751 : LOCKMASK conflictMask;
2752 : SHM_QUEUE *procLocks;
2753 : PROCLOCK *proclock;
2754 : uint32 hashcode;
2755 : LWLock *partitionLock;
2756 23 : int count = 0;
2757 23 : int fast_count = 0;
2758 :
2759 23 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2760 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2761 23 : lockMethodTable = LockMethods[lockmethodid];
2762 23 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2763 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2764 :
2765 : /*
2766 : * Allocate memory to store results, and fill with InvalidVXID. We only
2767 : * need enough space for MaxBackends + a terminator, since prepared xacts
2768 : * don't count. InHotStandby allocate once in TopMemoryContext.
2769 : */
2770 23 : if (InHotStandby)
2771 : {
2772 0 : if (vxids == NULL)
2773 0 : vxids = (VirtualTransactionId *)
2774 0 : MemoryContextAlloc(TopMemoryContext,
2775 : sizeof(VirtualTransactionId) * (MaxBackends + 1));
2776 : }
2777 : else
2778 23 : vxids = (VirtualTransactionId *)
2779 23 : palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
2780 :
2781 : /* Compute hash code and partition lock, and look up conflicting modes. */
2782 23 : hashcode = LockTagHashCode(locktag);
2783 23 : partitionLock = LockHashPartitionLock(hashcode);
2784 23 : conflictMask = lockMethodTable->conflictTab[lockmode];
2785 :
2786 : /*
2787 : * Fast path locks might not have been entered in the primary lock table.
2788 : * If the lock we're dealing with could conflict with such a lock, we must
2789 : * examine each backend's fast-path array for conflicts.
2790 : */
2791 23 : if (ConflictsWithRelationFastPath(locktag, lockmode))
2792 : {
2793 : int i;
2794 23 : Oid relid = locktag->locktag_field2;
2795 : VirtualTransactionId vxid;
2796 :
2797 : /*
2798 : * Iterate over relevant PGPROCs. Anything held by a prepared
2799 : * transaction will have been transferred to the primary lock table,
2800 : * so we need not worry about those. This is all a bit fuzzy, because
2801 : * new locks could be taken after we've visited a particular
2802 : * partition, but the callers had better be prepared to deal with that
2803 : * anyway, since the locks could equally well be taken between the
2804 : * time we return the value and the time the caller does something
2805 : * with it.
2806 : */
2807 2691 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2808 : {
2809 2668 : PGPROC *proc = &ProcGlobal->allProcs[i];
2810 : uint32 f;
2811 :
2812 : /* A backend never blocks itself */
2813 2668 : if (proc == MyProc)
2814 23 : continue;
2815 :
2816 2645 : LWLockAcquire(&proc->backendLock, LW_SHARED);
2817 :
2818 : /*
2819 : * If the target backend isn't referencing the same database as
2820 : * the lock, then we needn't examine the individual relation IDs
2821 : * at all; none of them can be relevant.
2822 : *
2823 : * See FastPathTransferLocks() for discussion of why we do this
2824 : * test after acquiring the lock.
2825 : */
2826 2645 : if (proc->databaseId != locktag->locktag_field1)
2827 : {
2828 2553 : LWLockRelease(&proc->backendLock);
2829 2553 : continue;
2830 : }
2831 :
2832 1564 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2833 : {
2834 : uint32 lockmask;
2835 :
2836 : /* Look for an allocated slot matching the given relid. */
2837 1472 : if (relid != proc->fpRelId[f])
2838 1472 : continue;
2839 0 : lockmask = FAST_PATH_GET_BITS(proc, f);
2840 0 : if (!lockmask)
2841 0 : continue;
2842 0 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2843 :
2844 : /*
2845 : * There can only be one entry per relation, so if we found it
2846 : * and it doesn't conflict, we can skip the rest of the slots.
2847 : */
2848 0 : if ((lockmask & conflictMask) == 0)
2849 0 : break;
2850 :
2851 : /* Conflict! */
2852 0 : GET_VXID_FROM_PGPROC(vxid, *proc);
2853 :
2854 : /*
2855 : * If we see an invalid VXID, then either the xact has already
2856 : * committed (or aborted), or it's a prepared xact. In either
2857 : * case we may ignore it.
2858 : */
2859 0 : if (VirtualTransactionIdIsValid(vxid))
2860 0 : vxids[count++] = vxid;
2861 :
2862 : /* No need to examine remaining slots. */
2863 0 : break;
2864 : }
2865 :
2866 92 : LWLockRelease(&proc->backendLock);
2867 : }
2868 : }
2869 :
2870 : /* Remember how many fast-path conflicts we found. */
2871 23 : fast_count = count;
2872 :
2873 : /*
2874 : * Look up the lock object matching the tag.
2875 : */
2876 23 : LWLockAcquire(partitionLock, LW_SHARED);
2877 :
2878 23 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2879 : (const void *) locktag,
2880 : hashcode,
2881 : HASH_FIND,
2882 : NULL);
2883 23 : if (!lock)
2884 : {
2885 : /*
2886 : * If the lock object doesn't exist, there is nothing holding a lock
2887 : * on this lockable object.
2888 : */
2889 0 : LWLockRelease(partitionLock);
2890 0 : vxids[count].backendId = InvalidBackendId;
2891 0 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2892 0 : return vxids;
2893 : }
2894 :
2895 : /*
2896 : * Examine each existing holder (or awaiter) of the lock.
2897 : */
2898 :
2899 23 : procLocks = &(lock->procLocks);
2900 :
2901 23 : proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2902 : offsetof(PROCLOCK, lockLink));
2903 :
2904 69 : while (proclock)
2905 : {
2906 23 : if (conflictMask & proclock->holdMask)
2907 : {
2908 23 : PGPROC *proc = proclock->tag.myProc;
2909 :
2910 : /* A backend never blocks itself */
2911 23 : if (proc != MyProc)
2912 : {
2913 : VirtualTransactionId vxid;
2914 :
2915 0 : GET_VXID_FROM_PGPROC(vxid, *proc);
2916 :
2917 : /*
2918 : * If we see an invalid VXID, then either the xact has already
2919 : * committed (or aborted), or it's a prepared xact. In either
2920 : * case we may ignore it.
2921 : */
2922 0 : if (VirtualTransactionIdIsValid(vxid))
2923 : {
2924 : int i;
2925 :
2926 : /* Avoid duplicate entries. */
2927 0 : for (i = 0; i < fast_count; ++i)
2928 0 : if (VirtualTransactionIdEquals(vxids[i], vxid))
2929 0 : break;
2930 0 : if (i >= fast_count)
2931 0 : vxids[count++] = vxid;
2932 : }
2933 : }
2934 : }
2935 :
2936 23 : proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
2937 : offsetof(PROCLOCK, lockLink));
2938 : }
2939 :
2940 23 : LWLockRelease(partitionLock);
2941 :
2942 23 : if (count > MaxBackends) /* should never happen */
2943 0 : elog(PANIC, "too many conflicting locks found");
2944 :
2945 23 : vxids[count].backendId = InvalidBackendId;
2946 23 : vxids[count].localTransactionId = InvalidLocalTransactionId;
2947 23 : return vxids;
2948 : }
2949 :
2950 : /*
2951 : * Find a lock in the shared lock table and release it. It is the caller's
2952 : * responsibility to verify that this is a sane thing to do. (For example, it
2953 : * would be bad to release a lock here if there might still be a LOCALLOCK
2954 : * object with pointers to it.)
2955 : *
2956 : * We currently use this in two situations: first, to release locks held by
2957 : * prepared transactions on commit (see lock_twophase_postcommit); and second,
2958 : * to release locks taken via the fast-path, transferred to the main hash
2959 : * table, and then released (see LockReleaseAll).
2960 : */
2961 : static void
2962 149 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
2963 : LOCKTAG *locktag, LOCKMODE lockmode,
2964 : bool decrement_strong_lock_count)
2965 : {
2966 : LOCK *lock;
2967 : PROCLOCK *proclock;
2968 : PROCLOCKTAG proclocktag;
2969 : uint32 hashcode;
2970 : uint32 proclock_hashcode;
2971 : LWLock *partitionLock;
2972 : bool wakeupNeeded;
2973 :
2974 149 : hashcode = LockTagHashCode(locktag);
2975 149 : partitionLock = LockHashPartitionLock(hashcode);
2976 :
2977 149 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2978 :
2979 : /*
2980 : * Re-find the lock object (it had better be there).
2981 : */
2982 149 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2983 : (void *) locktag,
2984 : hashcode,
2985 : HASH_FIND,
2986 : NULL);
2987 149 : if (!lock)
2988 0 : elog(PANIC, "failed to re-find shared lock object");
2989 :
2990 : /*
2991 : * Re-find the proclock object (ditto).
2992 : */
2993 149 : proclocktag.myLock = lock;
2994 149 : proclocktag.myProc = proc;
2995 :
2996 149 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
2997 :
2998 149 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
2999 : (void *) &proclocktag,
3000 : proclock_hashcode,
3001 : HASH_FIND,
3002 : NULL);
3003 149 : if (!proclock)
3004 0 : elog(PANIC, "failed to re-find shared proclock object");
3005 :
3006 : /*
3007 : * Double-check that we are actually holding a lock of the type we want to
3008 : * release.
3009 : */
3010 149 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3011 : {
3012 : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3013 0 : LWLockRelease(partitionLock);
3014 0 : elog(WARNING, "you don't own a lock of type %s",
3015 : lockMethodTable->lockModeNames[lockmode]);
3016 149 : return;
3017 : }
3018 :
3019 : /*
3020 : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3021 : */
3022 149 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3023 :
3024 149 : CleanUpLock(lock, proclock,
3025 : lockMethodTable, hashcode,
3026 : wakeupNeeded);
3027 :
3028 149 : LWLockRelease(partitionLock);
3029 :
3030 : /*
3031 : * Decrement strong lock count. This logic is needed only for 2PC.
3032 : */
3033 149 : if (decrement_strong_lock_count
3034 24 : && ConflictsWithRelationFastPath(locktag, lockmode))
3035 : {
3036 3 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3037 :
3038 3 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3039 3 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3040 3 : FastPathStrongRelationLocks->count[fasthashcode]--;
3041 3 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3042 : }
3043 : }
3044 :
3045 : /*
3046 : * AtPrepare_Locks
3047 : * Do the preparatory work for a PREPARE: make 2PC state file records
3048 : * for all locks currently held.
3049 : *
3050 : * Session-level locks are ignored, as are VXID locks.
3051 : *
3052 : * There are some special cases that we error out on: we can't be holding any
3053 : * locks at both session and transaction level (since we must either keep or
3054 : * give away the PROCLOCK object), and we can't be holding any locks on
3055 : * temporary objects (since that would mess up the current backend if it tries
3056 : * to exit before the prepared xact is committed).
3057 : */
3058 : void
3059 6 : AtPrepare_Locks(void)
3060 : {
3061 : HASH_SEQ_STATUS status;
3062 : LOCALLOCK *locallock;
3063 :
3064 : /*
3065 : * For the most part, we don't need to touch shared memory for this ---
3066 : * all the necessary state information is in the locallock table.
3067 : * Fast-path locks are an exception, however: we move any such locks to
3068 : * the main table before allowing PREPARE TRANSACTION to succeed.
3069 : */
3070 6 : hash_seq_init(&status, LockMethodLocalHash);
3071 :
3072 36 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3073 : {
3074 : TwoPhaseLockRecord record;
3075 24 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3076 : bool haveSessionLock;
3077 : bool haveXactLock;
3078 : int i;
3079 :
3080 : /*
3081 : * Ignore VXID locks. We don't want those to be held by prepared
3082 : * transactions, since they aren't meaningful after a restart.
3083 : */
3084 24 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3085 0 : continue;
3086 :
3087 : /* Ignore it if we don't actually hold the lock */
3088 24 : if (locallock->nLocks <= 0)
3089 0 : continue;
3090 :
3091 : /* Scan to see whether we hold it at session or transaction level */
3092 24 : haveSessionLock = haveXactLock = false;
3093 49 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3094 : {
3095 25 : if (lockOwners[i].owner == NULL)
3096 0 : haveSessionLock = true;
3097 : else
3098 25 : haveXactLock = true;
3099 : }
3100 :
3101 : /* Ignore it if we have only session lock */
3102 24 : if (!haveXactLock)
3103 0 : continue;
3104 :
3105 : /*
3106 : * If we have both session- and transaction-level locks, fail. This
3107 : * should never happen with regular locks, since we only take those at
3108 : * session level in some special operations like VACUUM. It's
3109 : * possible to hit this with advisory locks, though.
3110 : *
3111 : * It would be nice if we could keep the session hold and give away
3112 : * the transactional hold to the prepared xact. However, that would
3113 : * require two PROCLOCK objects, and we cannot be sure that another
3114 : * PROCLOCK will be available when it comes time for PostPrepare_Locks
3115 : * to do the deed. So for now, we error out while we can still do so
3116 : * safely.
3117 : */
3118 24 : if (haveSessionLock)
3119 0 : ereport(ERROR,
3120 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3121 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3122 :
3123 : /*
3124 : * If the local lock was taken via the fast-path, we need to move it
3125 : * to the primary lock table, or just get a pointer to the existing
3126 : * primary lock table entry if by chance it's already been
3127 : * transferred.
3128 : */
3129 24 : if (locallock->proclock == NULL)
3130 : {
3131 8 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
3132 8 : locallock->lock = locallock->proclock->tag.myLock;
3133 : }
3134 :
3135 : /*
3136 : * Arrange to not release any strong lock count held by this lock
3137 : * entry. We must retain the count until the prepared transaction is
3138 : * committed or rolled back.
3139 : */
3140 24 : locallock->holdsStrongLockCount = FALSE;
3141 :
3142 : /*
3143 : * Create a 2PC record.
3144 : */
3145 24 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3146 24 : record.lockmode = locallock->tag.mode;
3147 :
3148 24 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3149 : &record, sizeof(TwoPhaseLockRecord));
3150 : }
3151 6 : }
3152 :
3153 : /*
3154 : * PostPrepare_Locks
3155 : * Clean up after successful PREPARE
3156 : *
3157 : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3158 : * that's now associated with the prepared transaction, and we want to
3159 : * clean out the corresponding entries in the LOCALLOCK table.
3160 : *
3161 : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3162 : * pointers in the transaction's resource owner. This is OK at the
3163 : * moment since resowner.c doesn't try to free locks retail at a toplevel
3164 : * transaction commit or abort. We could alternatively zero out nLocks
3165 : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3166 : * but that probably costs more cycles.
3167 : */
3168 : void
3169 6 : PostPrepare_Locks(TransactionId xid)
3170 : {
3171 6 : PGPROC *newproc = TwoPhaseGetDummyProc(xid);
3172 : HASH_SEQ_STATUS status;
3173 : LOCALLOCK *locallock;
3174 : LOCK *lock;
3175 : PROCLOCK *proclock;
3176 : PROCLOCKTAG proclocktag;
3177 : int partition;
3178 :
3179 : /* Can't prepare a lock group follower. */
3180 6 : Assert(MyProc->lockGroupLeader == NULL ||
3181 : MyProc->lockGroupLeader == MyProc);
3182 :
3183 : /* This is a critical section: any error means big trouble */
3184 6 : START_CRIT_SECTION();
3185 :
3186 : /*
3187 : * First we run through the locallock table and get rid of unwanted
3188 : * entries, then we scan the process's proclocks and transfer them to the
3189 : * target proc.
3190 : *
3191 : * We do this separately because we may have multiple locallock entries
3192 : * pointing to the same proclock, and we daren't end up with any dangling
3193 : * pointers.
3194 : */
3195 6 : hash_seq_init(&status, LockMethodLocalHash);
3196 :
3197 36 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3198 : {
3199 24 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3200 : bool haveSessionLock;
3201 : bool haveXactLock;
3202 : int i;
3203 :
3204 24 : if (locallock->proclock == NULL || locallock->lock == NULL)
3205 : {
3206 : /*
3207 : * We must've run out of shared memory while trying to set up this
3208 : * lock. Just forget the local entry.
3209 : */
3210 0 : Assert(locallock->nLocks == 0);
3211 0 : RemoveLocalLock(locallock);
3212 0 : continue;
3213 : }
3214 :
3215 : /* Ignore VXID locks */
3216 24 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3217 0 : continue;
3218 :
3219 : /* Scan to see whether we hold it at session or transaction level */
3220 24 : haveSessionLock = haveXactLock = false;
3221 49 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3222 : {
3223 25 : if (lockOwners[i].owner == NULL)
3224 0 : haveSessionLock = true;
3225 : else
3226 25 : haveXactLock = true;
3227 : }
3228 :
3229 : /* Ignore it if we have only session lock */
3230 24 : if (!haveXactLock)
3231 0 : continue;
3232 :
3233 : /* This can't happen, because we already checked it */
3234 24 : if (haveSessionLock)
3235 0 : ereport(PANIC,
3236 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3237 : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3238 :
3239 : /* Mark the proclock to show we need to release this lockmode */
3240 24 : if (locallock->nLocks > 0)
3241 24 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3242 :
3243 : /* And remove the locallock hashtable entry */
3244 24 : RemoveLocalLock(locallock);
3245 : }
3246 :
3247 : /*
3248 : * Now, scan each lock partition separately.
3249 : */
3250 102 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3251 : {
3252 : LWLock *partitionLock;
3253 96 : SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3254 : PROCLOCK *nextplock;
3255 :
3256 96 : partitionLock = LockHashPartitionLockByIndex(partition);
3257 :
3258 : /*
3259 : * If the proclock list for this partition is empty, we can skip
3260 : * acquiring the partition lock. This optimization is safer than the
3261 : * situation in LockReleaseAll, because we got rid of any fast-path
3262 : * locks during AtPrepare_Locks, so there cannot be any case where
3263 : * another backend is adding something to our lists now. For safety,
3264 : * though, we code this the same way as in LockReleaseAll.
3265 : */
3266 96 : if (SHMQueueNext(procLocks, procLocks,
3267 : offsetof(PROCLOCK, procLink)) == NULL)
3268 80 : continue; /* needn't examine this partition */
3269 :
3270 16 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3271 :
3272 49 : for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3273 : offsetof(PROCLOCK, procLink));
3274 : proclock;
3275 17 : proclock = nextplock)
3276 : {
3277 : /* Get link first, since we may unlink/relink this proclock */
3278 17 : nextplock = (PROCLOCK *)
3279 17 : SHMQueueNext(procLocks, &proclock->procLink,
3280 : offsetof(PROCLOCK, procLink));
3281 :
3282 17 : Assert(proclock->tag.myProc == MyProc);
3283 :
3284 17 : lock = proclock->tag.myLock;
3285 :
3286 : /* Ignore VXID locks */
3287 17 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3288 0 : continue;
3289 :
3290 : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3291 : LOCK_PRINT("PostPrepare_Locks", lock, 0);
3292 17 : Assert(lock->nRequested >= 0);
3293 17 : Assert(lock->nGranted >= 0);
3294 17 : Assert(lock->nGranted <= lock->nRequested);
3295 17 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3296 :
3297 : /* Ignore it if nothing to release (must be a session lock) */
3298 17 : if (proclock->releaseMask == 0)
3299 0 : continue;
3300 :
3301 : /* Else we should be releasing all locks */
3302 17 : if (proclock->releaseMask != proclock->holdMask)
3303 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3304 :
3305 : /*
3306 : * We cannot simply modify proclock->tag.myProc to reassign
3307 : * ownership of the lock, because that's part of the hash key and
3308 : * the proclock would then be in the wrong hash chain. Instead
3309 : * use hash_update_hash_key. (We used to create a new hash entry,
3310 : * but that risks out-of-memory failure if other processes are
3311 : * busy making proclocks too.) We must unlink the proclock from
3312 : * our procLink chain and put it into the new proc's chain, too.
3313 : *
3314 : * Note: the updated proclock hash key will still belong to the
3315 : * same hash partition, cf proclock_hash(). So the partition lock
3316 : * we already hold is sufficient for this.
3317 : */
3318 17 : SHMQueueDelete(&proclock->procLink);
3319 :
3320 : /*
3321 : * Create the new hash key for the proclock.
3322 : */
3323 17 : proclocktag.myLock = lock;
3324 17 : proclocktag.myProc = newproc;
3325 :
3326 : /*
3327 : * Update groupLeader pointer to point to the new proc. (We'd
3328 : * better not be a member of somebody else's lock group!)
3329 : */
3330 17 : Assert(proclock->groupLeader == proclock->tag.myProc);
3331 17 : proclock->groupLeader = newproc;
3332 :
3333 : /*
3334 : * Update the proclock. We should not find any existing entry for
3335 : * the same hash key, since there can be only one entry for any
3336 : * given lock with my own proc.
3337 : */
3338 17 : if (!hash_update_hash_key(LockMethodProcLockHash,
3339 : (void *) proclock,
3340 : (void *) &proclocktag))
3341 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3342 :
3343 : /* Re-link into the new proc's proclock list */
3344 17 : SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
3345 : &proclock->procLink);
3346 :
3347 : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3348 : } /* loop over PROCLOCKs within this partition */
3349 :
3350 16 : LWLockRelease(partitionLock);
3351 : } /* loop over partitions */
3352 :
3353 6 : END_CRIT_SECTION();
3354 6 : }
3355 :
3356 :
3357 : /*
3358 : * Estimate shared-memory space used for lock tables
3359 : */
3360 : Size
3361 5 : LockShmemSize(void)
3362 : {
3363 5 : Size size = 0;
3364 : long max_table_size;
3365 :
3366 : /* lock hash table */
3367 5 : max_table_size = NLOCKENTS();
3368 5 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3369 :
3370 : /* proclock hash table */
3371 5 : max_table_size *= 2;
3372 5 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3373 :
3374 : /*
3375 : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3376 : */
3377 5 : size = add_size(size, size / 10);
3378 :
3379 5 : return size;
3380 : }
3381 :
3382 : /*
3383 : * GetLockStatusData - Return a summary of the lock manager's internal
3384 : * status, for use in a user-level reporting function.
3385 : *
3386 : * The return data consists of an array of LockInstanceData objects,
3387 : * which are a lightly abstracted version of the PROCLOCK data structures,
3388 : * i.e. there is one entry for each unique lock and interested PGPROC.
3389 : * It is the caller's responsibility to match up related items (such as
3390 : * references to the same lockable object or PGPROC) if wanted.
3391 : *
3392 : * The design goal is to hold the LWLocks for as short a time as possible;
3393 : * thus, this function simply makes a copy of the necessary data and releases
3394 : * the locks, allowing the caller to contemplate and format the data for as
3395 : * long as it pleases.
3396 : */
3397 : LockData *
3398 52 : GetLockStatusData(void)
3399 : {
3400 : LockData *data;
3401 : PROCLOCK *proclock;
3402 : HASH_SEQ_STATUS seqstat;
3403 : int els;
3404 : int el;
3405 : int i;
3406 :
3407 52 : data = (LockData *) palloc(sizeof(LockData));
3408 :
3409 : /* Guess how much space we'll need. */
3410 52 : els = MaxBackends;
3411 52 : el = 0;
3412 52 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3413 :
3414 : /*
3415 : * First, we iterate through the per-backend fast-path arrays, locking
3416 : * them one at a time. This might produce an inconsistent picture of the
3417 : * system state, but taking all of those LWLocks at the same time seems
3418 : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3419 : * matter too much, because none of these locks can be involved in lock
3420 : * conflicts anyway - anything that might must be present in the main lock
3421 : * table. (For the same reason, we don't sweat about making leaderPid
3422 : * completely valid. We cannot safely dereference another backend's
3423 : * lockGroupLeader field without holding all lock partition locks, and
3424 : * it's not worth that.)
3425 : */
3426 6084 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3427 : {
3428 6032 : PGPROC *proc = &ProcGlobal->allProcs[i];
3429 : uint32 f;
3430 :
3431 6032 : LWLockAcquire(&proc->backendLock, LW_SHARED);
3432 :
3433 102544 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3434 : {
3435 : LockInstanceData *instance;
3436 96512 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3437 :
3438 : /* Skip unallocated slots. */
3439 96512 : if (!lockbits)
3440 95843 : continue;
3441 :
3442 669 : if (el >= els)
3443 : {
3444 0 : els += MaxBackends;
3445 0 : data->locks = (LockInstanceData *)
3446 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3447 : }
3448 :
3449 669 : instance = &data->locks[el];
3450 669 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3451 : proc->fpRelId[f]);
3452 669 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3453 669 : instance->waitLockMode = NoLock;
3454 669 : instance->backend = proc->backendId;
3455 669 : instance->lxid = proc->lxid;
3456 669 : instance->pid = proc->pid;
3457 669 : instance->leaderPid = proc->pid;
3458 669 : instance->fastpath = true;
3459 :
3460 669 : el++;
3461 : }
3462 :
3463 6032 : if (proc->fpVXIDLock)
3464 : {
3465 : VirtualTransactionId vxid;
3466 : LockInstanceData *instance;
3467 :
3468 155 : if (el >= els)
3469 : {
3470 0 : els += MaxBackends;
3471 0 : data->locks = (LockInstanceData *)
3472 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3473 : }
3474 :
3475 155 : vxid.backendId = proc->backendId;
3476 155 : vxid.localTransactionId = proc->fpLocalTransactionId;
3477 :
3478 155 : instance = &data->locks[el];
3479 155 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3480 155 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3481 155 : instance->waitLockMode = NoLock;
3482 155 : instance->backend = proc->backendId;
3483 155 : instance->lxid = proc->lxid;
3484 155 : instance->pid = proc->pid;
3485 155 : instance->leaderPid = proc->pid;
3486 155 : instance->fastpath = true;
3487 :
3488 155 : el++;
3489 : }
3490 :
3491 6032 : LWLockRelease(&proc->backendLock);
3492 : }
3493 :
3494 : /*
3495 : * Next, acquire lock on the entire shared lock data structure. We do
3496 : * this so that, at least for locks in the primary lock table, the state
3497 : * will be self-consistent.
3498 : *
3499 : * Since this is a read-only operation, we take shared instead of
3500 : * exclusive lock. There's not a whole lot of point to this, because all
3501 : * the normal operations require exclusive lock, but it doesn't hurt
3502 : * anything either. It will at least allow two backends to do
3503 : * GetLockStatusData in parallel.
3504 : *
3505 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3506 : */
3507 884 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3508 832 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3509 :
3510 : /* Now we can safely count the number of proclocks */
3511 52 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3512 52 : if (data->nelements > els)
3513 : {
3514 0 : els = data->nelements;
3515 0 : data->locks = (LockInstanceData *)
3516 0 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3517 : }
3518 :
3519 : /* Now scan the tables to copy the data */
3520 52 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3521 :
3522 279 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3523 : {
3524 175 : PGPROC *proc = proclock->tag.myProc;
3525 175 : LOCK *lock = proclock->tag.myLock;
3526 175 : LockInstanceData *instance = &data->locks[el];
3527 :
3528 175 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3529 175 : instance->holdMask = proclock->holdMask;
3530 175 : if (proc->waitLock == proclock->tag.myLock)
3531 0 : instance->waitLockMode = proc->waitLockMode;
3532 : else
3533 175 : instance->waitLockMode = NoLock;
3534 175 : instance->backend = proc->backendId;
3535 175 : instance->lxid = proc->lxid;
3536 175 : instance->pid = proc->pid;
3537 175 : instance->leaderPid = proclock->groupLeader->pid;
3538 175 : instance->fastpath = false;
3539 :
3540 175 : el++;
3541 : }
3542 :
3543 : /*
3544 : * And release locks. We do this in reverse order for two reasons: (1)
3545 : * Anyone else who needs more than one of the locks will be trying to lock
3546 : * them in increasing order; we don't want to release the other process
3547 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3548 : * behavior inside LWLockRelease.
3549 : */
3550 936 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3551 832 : LWLockRelease(LockHashPartitionLockByIndex(i));
3552 :
3553 52 : Assert(el == data->nelements);
3554 :
3555 52 : return data;
3556 : }
3557 :
3558 : /*
3559 : * GetBlockerStatusData - Return a summary of the lock manager's state
3560 : * concerning locks that are blocking the specified PID or any member of
3561 : * the PID's lock group, for use in a user-level reporting function.
3562 : *
3563 : * For each PID within the lock group that is awaiting some heavyweight lock,
3564 : * the return data includes an array of LockInstanceData objects, which are
3565 : * the same data structure used by GetLockStatusData; but unlike that function,
3566 : * this one reports only the PROCLOCKs associated with the lock that that PID
3567 : * is blocked on. (Hence, all the locktags should be the same for any one
3568 : * blocked PID.) In addition, we return an array of the PIDs of those backends
3569 : * that are ahead of the blocked PID in the lock's wait queue. These can be
3570 : * compared with the PIDs in the LockInstanceData objects to determine which
3571 : * waiters are ahead of or behind the blocked PID in the queue.
3572 : *
3573 : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3574 : * waiting on any heavyweight lock, return empty arrays.
3575 : *
3576 : * The design goal is to hold the LWLocks for as short a time as possible;
3577 : * thus, this function simply makes a copy of the necessary data and releases
3578 : * the locks, allowing the caller to contemplate and format the data for as
3579 : * long as it pleases.
3580 : */
3581 : BlockedProcsData *
3582 0 : GetBlockerStatusData(int blocked_pid)
3583 : {
3584 : BlockedProcsData *data;
3585 : PGPROC *proc;
3586 : int i;
3587 :
3588 0 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3589 :
3590 : /*
3591 : * Guess how much space we'll need, and preallocate. Most of the time
3592 : * this will avoid needing to do repalloc while holding the LWLocks. (We
3593 : * assume, but check with an Assert, that MaxBackends is enough entries
3594 : * for the procs[] array; the other two could need enlargement, though.)
3595 : */
3596 0 : data->nprocs = data->nlocks = data->npids = 0;
3597 0 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3598 0 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3599 0 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3600 0 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3601 :
3602 : /*
3603 : * In order to search the ProcArray for blocked_pid and assume that that
3604 : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3605 : * In addition, to examine the lock grouping fields of any other backend,
3606 : * we must hold all the hash partition locks. (Only one of those locks is
3607 : * actually relevant for any one lock group, but we can't know which one
3608 : * ahead of time.) It's fairly annoying to hold all those locks
3609 : * throughout this, but it's no worse than GetLockStatusData(), and it
3610 : * does have the advantage that we're guaranteed to return a
3611 : * self-consistent instantaneous state.
3612 : */
3613 0 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3614 :
3615 0 : proc = BackendPidGetProcWithLock(blocked_pid);
3616 :
3617 : /* Nothing to do if it's gone */
3618 0 : if (proc != NULL)
3619 : {
3620 : /*
3621 : * Acquire lock on the entire shared lock data structure. See notes
3622 : * in GetLockStatusData().
3623 : */
3624 0 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3625 0 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3626 :
3627 0 : if (proc->lockGroupLeader == NULL)
3628 : {
3629 : /* Easy case, proc is not a lock group member */
3630 0 : GetSingleProcBlockerStatusData(proc, data);
3631 : }
3632 : else
3633 : {
3634 : /* Examine all procs in proc's lock group */
3635 : dlist_iter iter;
3636 :
3637 0 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3638 : {
3639 : PGPROC *memberProc;
3640 :
3641 0 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3642 0 : GetSingleProcBlockerStatusData(memberProc, data);
3643 : }
3644 : }
3645 :
3646 : /*
3647 : * And release locks. See notes in GetLockStatusData().
3648 : */
3649 0 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3650 0 : LWLockRelease(LockHashPartitionLockByIndex(i));
3651 :
3652 0 : Assert(data->nprocs <= data->maxprocs);
3653 : }
3654 :
3655 0 : LWLockRelease(ProcArrayLock);
3656 :
3657 0 : return data;
3658 : }
3659 :
3660 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3661 : static void
3662 0 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3663 : {
3664 0 : LOCK *theLock = blocked_proc->waitLock;
3665 : BlockedProcData *bproc;
3666 : SHM_QUEUE *procLocks;
3667 : PROCLOCK *proclock;
3668 : PROC_QUEUE *waitQueue;
3669 : PGPROC *proc;
3670 : int queue_size;
3671 : int i;
3672 :
3673 : /* Nothing to do if this proc is not blocked */
3674 0 : if (theLock == NULL)
3675 0 : return;
3676 :
3677 : /* Set up a procs[] element */
3678 0 : bproc = &data->procs[data->nprocs++];
3679 0 : bproc->pid = blocked_proc->pid;
3680 0 : bproc->first_lock = data->nlocks;
3681 0 : bproc->first_waiter = data->npids;
3682 :
3683 : /*
3684 : * We may ignore the proc's fast-path arrays, since nothing in those could
3685 : * be related to a contended lock.
3686 : */
3687 :
3688 : /* Collect all PROCLOCKs associated with theLock */
3689 0 : procLocks = &(theLock->procLocks);
3690 0 : proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3691 : offsetof(PROCLOCK, lockLink));
3692 0 : while (proclock)
3693 : {
3694 0 : PGPROC *proc = proclock->tag.myProc;
3695 0 : LOCK *lock = proclock->tag.myLock;
3696 : LockInstanceData *instance;
3697 :
3698 0 : if (data->nlocks >= data->maxlocks)
3699 : {
3700 0 : data->maxlocks += MaxBackends;
3701 0 : data->locks = (LockInstanceData *)
3702 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3703 : }
3704 :
3705 0 : instance = &data->locks[data->nlocks];
3706 0 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3707 0 : instance->holdMask = proclock->holdMask;
3708 0 : if (proc->waitLock == lock)
3709 0 : instance->waitLockMode = proc->waitLockMode;
3710 : else
3711 0 : instance->waitLockMode = NoLock;
3712 0 : instance->backend = proc->backendId;
3713 0 : instance->lxid = proc->lxid;
3714 0 : instance->pid = proc->pid;
3715 0 : instance->leaderPid = proclock->groupLeader->pid;
3716 0 : instance->fastpath = false;
3717 0 : data->nlocks++;
3718 :
3719 0 : proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3720 : offsetof(PROCLOCK, lockLink));
3721 : }
3722 :
3723 : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3724 0 : waitQueue = &(theLock->waitProcs);
3725 0 : queue_size = waitQueue->size;
3726 :
3727 0 : if (queue_size > data->maxpids - data->npids)
3728 : {
3729 0 : data->maxpids = Max(data->maxpids + MaxBackends,
3730 : data->npids + queue_size);
3731 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
3732 0 : sizeof(int) * data->maxpids);
3733 : }
3734 :
3735 : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3736 0 : proc = (PGPROC *) waitQueue->links.next;
3737 0 : for (i = 0; i < queue_size; i++)
3738 : {
3739 0 : if (proc == blocked_proc)
3740 0 : break;
3741 0 : data->waiter_pids[data->npids++] = proc->pid;
3742 0 : proc = (PGPROC *) proc->links.next;
3743 : }
3744 :
3745 0 : bproc->num_locks = data->nlocks - bproc->first_lock;
3746 0 : bproc->num_waiters = data->npids - bproc->first_waiter;
3747 : }
3748 :
3749 : /*
3750 : * Returns a list of currently held AccessExclusiveLocks, for use by
3751 : * LogStandbySnapshot(). The result is a palloc'd array,
3752 : * with the number of elements returned into *nlocks.
3753 : *
3754 : * XXX This currently takes a lock on all partitions of the lock table,
3755 : * but it's possible to do better. By reference counting locks and storing
3756 : * the value in the ProcArray entry for each backend we could tell if any
3757 : * locks need recording without having to acquire the partition locks and
3758 : * scan the lock table. Whether that's worth the additional overhead
3759 : * is pretty dubious though.
3760 : */
3761 : xl_standby_lock *
3762 13 : GetRunningTransactionLocks(int *nlocks)
3763 : {
3764 : xl_standby_lock *accessExclusiveLocks;
3765 : PROCLOCK *proclock;
3766 : HASH_SEQ_STATUS seqstat;
3767 : int i;
3768 : int index;
3769 : int els;
3770 :
3771 : /*
3772 : * Acquire lock on the entire shared lock data structure.
3773 : *
3774 : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3775 : */
3776 221 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3777 208 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3778 :
3779 : /* Now we can safely count the number of proclocks */
3780 13 : els = hash_get_num_entries(LockMethodProcLockHash);
3781 :
3782 : /*
3783 : * Allocating enough space for all locks in the lock table is overkill,
3784 : * but it's more convenient and faster than having to enlarge the array.
3785 : */
3786 13 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3787 :
3788 : /* Now scan the tables to copy the data */
3789 13 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3790 :
3791 : /*
3792 : * If lock is a currently granted AccessExclusiveLock then it will have
3793 : * just one proclock holder, so locks are never accessed twice in this
3794 : * particular case. Don't copy this code for use elsewhere because in the
3795 : * general case this will give you duplicate locks when looking at
3796 : * non-exclusive lock types.
3797 : */
3798 13 : index = 0;
3799 72 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3800 : {
3801 : /* make sure this definition matches the one used in LockAcquire */
3802 54 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3803 8 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3804 : {
3805 8 : PGPROC *proc = proclock->tag.myProc;
3806 8 : PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
3807 8 : LOCK *lock = proclock->tag.myLock;
3808 8 : TransactionId xid = pgxact->xid;
3809 :
3810 : /*
3811 : * Don't record locks for transactions if we know they have
3812 : * already issued their WAL record for commit but not yet released
3813 : * lock. It is still possible that we see locks held by already
3814 : * complete transactions, if they haven't yet zeroed their xids.
3815 : */
3816 8 : if (!TransactionIdIsValid(xid))
3817 0 : continue;
3818 :
3819 8 : accessExclusiveLocks[index].xid = xid;
3820 8 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3821 8 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3822 :
3823 8 : index++;
3824 : }
3825 : }
3826 :
3827 13 : Assert(index <= els);
3828 :
3829 : /*
3830 : * And release locks. We do this in reverse order for two reasons: (1)
3831 : * Anyone else who needs more than one of the locks will be trying to lock
3832 : * them in increasing order; we don't want to release the other process
3833 : * until it can get all the locks it needs. (2) This avoids O(N^2)
3834 : * behavior inside LWLockRelease.
3835 : */
3836 234 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3837 208 : LWLockRelease(LockHashPartitionLockByIndex(i));
3838 :
3839 13 : *nlocks = index;
3840 13 : return accessExclusiveLocks;
3841 : }
3842 :
3843 : /* Provide the textual name of any lock mode */
3844 : const char *
3845 1016 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
3846 : {
3847 1016 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
3848 1016 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
3849 1016 : return LockMethods[lockmethodid]->lockModeNames[mode];
3850 : }
3851 :
3852 : #ifdef LOCK_DEBUG
3853 : /*
3854 : * Dump all locks in the given proc's myProcLocks lists.
3855 : *
3856 : * Caller is responsible for having acquired appropriate LWLocks.
3857 : */
3858 : void
3859 : DumpLocks(PGPROC *proc)
3860 : {
3861 : SHM_QUEUE *procLocks;
3862 : PROCLOCK *proclock;
3863 : LOCK *lock;
3864 : int i;
3865 :
3866 : if (proc == NULL)
3867 : return;
3868 :
3869 : if (proc->waitLock)
3870 : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
3871 :
3872 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3873 : {
3874 : procLocks = &(proc->myProcLocks[i]);
3875 :
3876 : proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3877 : offsetof(PROCLOCK, procLink));
3878 :
3879 : while (proclock)
3880 : {
3881 : Assert(proclock->tag.myProc == proc);
3882 :
3883 : lock = proclock->tag.myLock;
3884 :
3885 : PROCLOCK_PRINT("DumpLocks", proclock);
3886 : LOCK_PRINT("DumpLocks", lock, 0);
3887 :
3888 : proclock = (PROCLOCK *)
3889 : SHMQueueNext(procLocks, &proclock->procLink,
3890 : offsetof(PROCLOCK, procLink));
3891 : }
3892 : }
3893 : }
3894 :
3895 : /*
3896 : * Dump all lmgr locks.
3897 : *
3898 : * Caller is responsible for having acquired appropriate LWLocks.
3899 : */
3900 : void
3901 : DumpAllLocks(void)
3902 : {
3903 : PGPROC *proc;
3904 : PROCLOCK *proclock;
3905 : LOCK *lock;
3906 : HASH_SEQ_STATUS status;
3907 :
3908 : proc = MyProc;
3909 :
3910 : if (proc && proc->waitLock)
3911 : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
3912 :
3913 : hash_seq_init(&status, LockMethodProcLockHash);
3914 :
3915 : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
3916 : {
3917 : PROCLOCK_PRINT("DumpAllLocks", proclock);
3918 :
3919 : lock = proclock->tag.myLock;
3920 : if (lock)
3921 : LOCK_PRINT("DumpAllLocks", lock, 0);
3922 : else
3923 : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
3924 : }
3925 : }
3926 : #endif /* LOCK_DEBUG */
3927 :
3928 : /*
3929 : * LOCK 2PC resource manager's routines
3930 : */
3931 :
3932 : /*
3933 : * Re-acquire a lock belonging to a transaction that was prepared.
3934 : *
3935 : * Because this function is run at db startup, re-acquiring the locks should
3936 : * never conflict with running transactions because there are none. We
3937 : * assume that the lock state represented by the stored 2PC files is legal.
3938 : *
3939 : * When switching from Hot Standby mode to normal operation, the locks will
3940 : * be already held by the startup process. The locks are acquired for the new
3941 : * procs without checking for conflicts, so we don't get a conflict between the
3942 : * startup process and the dummy procs, even though we will momentarily have
3943 : * a situation where two procs are holding the same AccessExclusiveLock,
3944 : * which isn't normally possible because the conflict. If we're in standby
3945 : * mode, but a recovery snapshot hasn't been established yet, it's possible
3946 : * that some but not all of the locks are already held by the startup process.
3947 : *
3948 : * This approach is simple, but also a bit dangerous, because if there isn't
3949 : * enough shared memory to acquire the locks, an error will be thrown, which
3950 : * is promoted to FATAL and recovery will abort, bringing down postmaster.
3951 : * A safer approach would be to transfer the locks like we do in
3952 : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
3953 : * read-only backends to use up all the shared lock memory anyway, so that
3954 : * replaying the WAL record that needs to acquire a lock will throw an error
3955 : * and PANIC anyway.
3956 : */
3957 : void
3958 0 : lock_twophase_recover(TransactionId xid, uint16 info,
3959 : void *recdata, uint32 len)
3960 : {
3961 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
3962 0 : PGPROC *proc = TwoPhaseGetDummyProc(xid);
3963 : LOCKTAG *locktag;
3964 : LOCKMODE lockmode;
3965 : LOCKMETHODID lockmethodid;
3966 : LOCK *lock;
3967 : PROCLOCK *proclock;
3968 : PROCLOCKTAG proclocktag;
3969 : bool found;
3970 : uint32 hashcode;
3971 : uint32 proclock_hashcode;
3972 : int partition;
3973 : LWLock *partitionLock;
3974 : LockMethod lockMethodTable;
3975 :
3976 0 : Assert(len == sizeof(TwoPhaseLockRecord));
3977 0 : locktag = &rec->locktag;
3978 0 : lockmode = rec->lockmode;
3979 0 : lockmethodid = locktag->locktag_lockmethodid;
3980 :
3981 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3982 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3983 0 : lockMethodTable = LockMethods[lockmethodid];
3984 :
3985 0 : hashcode = LockTagHashCode(locktag);
3986 0 : partition = LockHashPartition(hashcode);
3987 0 : partitionLock = LockHashPartitionLock(hashcode);
3988 :
3989 0 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3990 :
3991 : /*
3992 : * Find or create a lock with this tag.
3993 : */
3994 0 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3995 : (void *) locktag,
3996 : hashcode,
3997 : HASH_ENTER_NULL,
3998 : &found);
3999 0 : if (!lock)
4000 : {
4001 0 : LWLockRelease(partitionLock);
4002 0 : ereport(ERROR,
4003 : (errcode(ERRCODE_OUT_OF_MEMORY),
4004 : errmsg("out of shared memory"),
4005 : errhint("You might need to increase max_locks_per_transaction.")));
4006 : }
4007 :
4008 : /*
4009 : * if it's a new lock object, initialize it
4010 : */
4011 0 : if (!found)
4012 : {
4013 0 : lock->grantMask = 0;
4014 0 : lock->waitMask = 0;
4015 0 : SHMQueueInit(&(lock->procLocks));
4016 0 : ProcQueueInit(&(lock->waitProcs));
4017 0 : lock->nRequested = 0;
4018 0 : lock->nGranted = 0;
4019 0 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4020 0 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4021 : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4022 : }
4023 : else
4024 : {
4025 : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4026 0 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4027 0 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4028 0 : Assert(lock->nGranted <= lock->nRequested);
4029 : }
4030 :
4031 : /*
4032 : * Create the hash key for the proclock table.
4033 : */
4034 0 : proclocktag.myLock = lock;
4035 0 : proclocktag.myProc = proc;
4036 :
4037 0 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4038 :
4039 : /*
4040 : * Find or create a proclock entry with this tag
4041 : */
4042 0 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4043 : (void *) &proclocktag,
4044 : proclock_hashcode,
4045 : HASH_ENTER_NULL,
4046 : &found);
4047 0 : if (!proclock)
4048 : {
4049 : /* Oops, not enough shmem for the proclock */
4050 0 : if (lock->nRequested == 0)
4051 : {
4052 : /*
4053 : * There are no other requestors of this lock, so garbage-collect
4054 : * the lock object. We *must* do this to avoid a permanent leak
4055 : * of shared memory, because there won't be anything to cause
4056 : * anyone to release the lock object later.
4057 : */
4058 0 : Assert(SHMQueueEmpty(&(lock->procLocks)));
4059 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
4060 0 : (void *) &(lock->tag),
4061 : hashcode,
4062 : HASH_REMOVE,
4063 : NULL))
4064 0 : elog(PANIC, "lock table corrupted");
4065 : }
4066 0 : LWLockRelease(partitionLock);
4067 0 : ereport(ERROR,
4068 : (errcode(ERRCODE_OUT_OF_MEMORY),
4069 : errmsg("out of shared memory"),
4070 : errhint("You might need to increase max_locks_per_transaction.")));
4071 : }
4072 :
4073 : /*
4074 : * If new, initialize the new entry
4075 : */
4076 0 : if (!found)
4077 : {
4078 0 : Assert(proc->lockGroupLeader == NULL);
4079 0 : proclock->groupLeader = proc;
4080 0 : proclock->holdMask = 0;
4081 0 : proclock->releaseMask = 0;
4082 : /* Add proclock to appropriate lists */
4083 0 : SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4084 0 : SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4085 : &proclock->procLink);
4086 : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4087 : }
4088 : else
4089 : {
4090 : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4091 0 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4092 : }
4093 :
4094 : /*
4095 : * lock->nRequested and lock->requested[] count the total number of
4096 : * requests, whether granted or waiting, so increment those immediately.
4097 : */
4098 0 : lock->nRequested++;
4099 0 : lock->requested[lockmode]++;
4100 0 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4101 :
4102 : /*
4103 : * We shouldn't already hold the desired lock.
4104 : */
4105 0 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
4106 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4107 : lockMethodTable->lockModeNames[lockmode],
4108 : lock->tag.locktag_field1, lock->tag.locktag_field2,
4109 : lock->tag.locktag_field3);
4110 :
4111 : /*
4112 : * We ignore any possible conflicts and just grant ourselves the lock. Not
4113 : * only because we don't bother, but also to avoid deadlocks when
4114 : * switching from standby to normal mode. See function comment.
4115 : */
4116 0 : GrantLock(lock, proclock, lockmode);
4117 :
4118 : /*
4119 : * Bump strong lock count, to make sure any fast-path lock requests won't
4120 : * be granted without consulting the primary lock table.
4121 : */
4122 0 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4123 : {
4124 0 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4125 :
4126 0 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4127 0 : FastPathStrongRelationLocks->count[fasthashcode]++;
4128 0 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4129 : }
4130 :
4131 0 : LWLockRelease(partitionLock);
4132 0 : }
4133 :
4134 : /*
4135 : * Re-acquire a lock belonging to a transaction that was prepared, when
4136 : * starting up into hot standby mode.
4137 : */
4138 : void
4139 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4140 : void *recdata, uint32 len)
4141 : {
4142 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4143 : LOCKTAG *locktag;
4144 : LOCKMODE lockmode;
4145 : LOCKMETHODID lockmethodid;
4146 :
4147 0 : Assert(len == sizeof(TwoPhaseLockRecord));
4148 0 : locktag = &rec->locktag;
4149 0 : lockmode = rec->lockmode;
4150 0 : lockmethodid = locktag->locktag_lockmethodid;
4151 :
4152 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4153 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4154 :
4155 0 : if (lockmode == AccessExclusiveLock &&
4156 0 : locktag->locktag_type == LOCKTAG_RELATION)
4157 : {
4158 0 : StandbyAcquireAccessExclusiveLock(xid,
4159 : locktag->locktag_field1 /* dboid */ ,
4160 : locktag->locktag_field2 /* reloid */ );
4161 : }
4162 0 : }
4163 :
4164 :
4165 : /*
4166 : * 2PC processing routine for COMMIT PREPARED case.
4167 : *
4168 : * Find and release the lock indicated by the 2PC record.
4169 : */
4170 : void
4171 24 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4172 : void *recdata, uint32 len)
4173 : {
4174 24 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4175 24 : PGPROC *proc = TwoPhaseGetDummyProc(xid);
4176 : LOCKTAG *locktag;
4177 : LOCKMETHODID lockmethodid;
4178 : LockMethod lockMethodTable;
4179 :
4180 24 : Assert(len == sizeof(TwoPhaseLockRecord));
4181 24 : locktag = &rec->locktag;
4182 24 : lockmethodid = locktag->locktag_lockmethodid;
4183 :
4184 24 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4185 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4186 24 : lockMethodTable = LockMethods[lockmethodid];
4187 :
4188 24 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4189 24 : }
4190 :
4191 : /*
4192 : * 2PC processing routine for ROLLBACK PREPARED case.
4193 : *
4194 : * This is actually just the same as the COMMIT case.
4195 : */
4196 : void
4197 9 : lock_twophase_postabort(TransactionId xid, uint16 info,
4198 : void *recdata, uint32 len)
4199 : {
4200 9 : lock_twophase_postcommit(xid, info, recdata, len);
4201 9 : }
4202 :
4203 : /*
4204 : * VirtualXactLockTableInsert
4205 : *
4206 : * Take vxid lock via the fast-path. There can't be any pre-existing
4207 : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4208 : *
4209 : * Since MyProc->fpLocalTransactionId will normally contain the same data
4210 : * as MyProc->lxid, you might wonder if we really need both. The
4211 : * difference is that MyProc->lxid is set and cleared unlocked, and
4212 : * examined by procarray.c, while fpLocalTransactionId is protected by
4213 : * backendLock and is used only by the locking subsystem. Doing it this
4214 : * way makes it easier to verify that there are no funny race conditions.
4215 : *
4216 : * We don't bother recording this lock in the local lock table, since it's
4217 : * only ever released at the end of a transaction. Instead,
4218 : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4219 : */
4220 : void
4221 26218 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4222 : {
4223 26218 : Assert(VirtualTransactionIdIsValid(vxid));
4224 :
4225 26218 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
4226 :
4227 26218 : Assert(MyProc->backendId == vxid.backendId);
4228 26218 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4229 26218 : Assert(MyProc->fpVXIDLock == false);
4230 :
4231 26218 : MyProc->fpVXIDLock = true;
4232 26218 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4233 :
4234 26218 : LWLockRelease(&MyProc->backendLock);
4235 26218 : }
4236 :
4237 : /*
4238 : * VirtualXactLockTableCleanup
4239 : *
4240 : * Check whether a VXID lock has been materialized; if so, release it,
4241 : * unblocking waiters.
4242 : */
4243 : void
4244 26218 : VirtualXactLockTableCleanup(void)
4245 : {
4246 : bool fastpath;
4247 : LocalTransactionId lxid;
4248 :
4249 26218 : Assert(MyProc->backendId != InvalidBackendId);
4250 :
4251 : /*
4252 : * Clean up shared memory state.
4253 : */
4254 26218 : LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
4255 :
4256 26218 : fastpath = MyProc->fpVXIDLock;
4257 26218 : lxid = MyProc->fpLocalTransactionId;
4258 26218 : MyProc->fpVXIDLock = false;
4259 26218 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4260 :
4261 26218 : LWLockRelease(&MyProc->backendLock);
4262 :
4263 : /*
4264 : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4265 : * that means someone transferred the lock to the main lock table.
4266 : */
4267 26218 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4268 : {
4269 : VirtualTransactionId vxid;
4270 : LOCKTAG locktag;
4271 :
4272 0 : vxid.backendId = MyBackendId;
4273 0 : vxid.localTransactionId = lxid;
4274 0 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4275 :
4276 0 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4277 : &locktag, ExclusiveLock, false);
4278 : }
4279 26218 : }
4280 :
4281 : /*
4282 : * VirtualXactLock
4283 : *
4284 : * If wait = true, wait until the given VXID has been released, and then
4285 : * return true.
4286 : *
4287 : * If wait = false, just check whether the VXID is still running, and return
4288 : * true or false.
4289 : */
4290 : bool
4291 0 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4292 : {
4293 : LOCKTAG tag;
4294 : PGPROC *proc;
4295 :
4296 0 : Assert(VirtualTransactionIdIsValid(vxid));
4297 :
4298 0 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4299 :
4300 : /*
4301 : * If a lock table entry must be made, this is the PGPROC on whose behalf
4302 : * it must be done. Note that the transaction might end or the PGPROC
4303 : * might be reassigned to a new backend before we get around to examining
4304 : * it, but it doesn't matter. If we find upon examination that the
4305 : * relevant lxid is no longer running here, that's enough to prove that
4306 : * it's no longer running anywhere.
4307 : */
4308 0 : proc = BackendIdGetProc(vxid.backendId);
4309 0 : if (proc == NULL)
4310 0 : return true;
4311 :
4312 : /*
4313 : * We must acquire this lock before checking the backendId and lxid
4314 : * against the ones we're waiting for. The target backend will only set
4315 : * or clear lxid while holding this lock.
4316 : */
4317 0 : LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
4318 :
4319 : /* If the transaction has ended, our work here is done. */
4320 0 : if (proc->backendId != vxid.backendId
4321 0 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4322 : {
4323 0 : LWLockRelease(&proc->backendLock);
4324 0 : return true;
4325 : }
4326 :
4327 : /*
4328 : * If we aren't asked to wait, there's no need to set up a lock table
4329 : * entry. The transaction is still in progress, so just return false.
4330 : */
4331 0 : if (!wait)
4332 : {
4333 0 : LWLockRelease(&proc->backendLock);
4334 0 : return false;
4335 : }
4336 :
4337 : /*
4338 : * OK, we're going to need to sleep on the VXID. But first, we must set
4339 : * up the primary lock table entry, if needed (ie, convert the proc's
4340 : * fast-path lock on its VXID to a regular lock).
4341 : */
4342 0 : if (proc->fpVXIDLock)
4343 : {
4344 : PROCLOCK *proclock;
4345 : uint32 hashcode;
4346 : LWLock *partitionLock;
4347 :
4348 0 : hashcode = LockTagHashCode(&tag);
4349 :
4350 0 : partitionLock = LockHashPartitionLock(hashcode);
4351 0 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4352 :
4353 0 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4354 : &tag, hashcode, ExclusiveLock);
4355 0 : if (!proclock)
4356 : {
4357 0 : LWLockRelease(partitionLock);
4358 0 : LWLockRelease(&proc->backendLock);
4359 0 : ereport(ERROR,
4360 : (errcode(ERRCODE_OUT_OF_MEMORY),
4361 : errmsg("out of shared memory"),
4362 : errhint("You might need to increase max_locks_per_transaction.")));
4363 : }
4364 0 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4365 :
4366 0 : LWLockRelease(partitionLock);
4367 :
4368 0 : proc->fpVXIDLock = false;
4369 : }
4370 :
4371 : /* Done with proc->fpLockBits */
4372 0 : LWLockRelease(&proc->backendLock);
4373 :
4374 : /* Time to wait. */
4375 0 : (void) LockAcquire(&tag, ShareLock, false, false);
4376 :
4377 0 : LockRelease(&tag, ShareLock, false);
4378 0 : return true;
4379 : }
4380 :
4381 : /*
4382 : * LockWaiterCount
4383 : *
4384 : * Find the number of lock requester on this locktag
4385 : */
4386 : int
4387 3 : LockWaiterCount(const LOCKTAG *locktag)
4388 : {
4389 3 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4390 : LOCK *lock;
4391 : bool found;
4392 : uint32 hashcode;
4393 : LWLock *partitionLock;
4394 3 : int waiters = 0;
4395 :
4396 3 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4397 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4398 :
4399 3 : hashcode = LockTagHashCode(locktag);
4400 3 : partitionLock = LockHashPartitionLock(hashcode);
4401 3 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4402 :
4403 3 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4404 : (const void *) locktag,
4405 : hashcode,
4406 : HASH_FIND,
4407 : &found);
4408 3 : if (found)
4409 : {
4410 3 : Assert(lock != NULL);
4411 3 : waiters = lock->nRequested;
4412 : }
4413 3 : LWLockRelease(partitionLock);
4414 :
4415 3 : return waiters;
4416 : }
|