LCOV - code coverage report
Current view: top level - src/backend/storage/lmgr - proc.c (source / functions) Hit Total Coverage
Test: PostgreSQL Lines: 354 553 64.0 %
Date: 2017-09-29 13:40:31 Functions: 17 27 63.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * proc.c
       4             :  *    routines to manage per-process shared memory data structure
       5             :  *
       6             :  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/storage/lmgr/proc.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /*
      16             :  * Interface (a):
      17             :  *      ProcSleep(), ProcWakeup(),
      18             :  *      ProcQueueAlloc() -- create a shm queue for sleeping processes
      19             :  *      ProcQueueInit() -- create a queue without allocing memory
      20             :  *
      21             :  * Waiting for a lock causes the backend to be put to sleep.  Whoever releases
      22             :  * the lock wakes the process up again (and gives it an error code so it knows
      23             :  * whether it was awoken on an error condition).
      24             :  *
      25             :  * Interface (b):
      26             :  *
      27             :  * ProcReleaseLocks -- frees the locks associated with current transaction
      28             :  *
      29             :  * ProcKill -- destroys the shared memory state (and locks)
      30             :  * associated with the process.
      31             :  */
      32             : #include "postgres.h"
      33             : 
      34             : #include <signal.h>
      35             : #include <unistd.h>
      36             : #include <sys/time.h>
      37             : 
      38             : #include "access/transam.h"
      39             : #include "access/twophase.h"
      40             : #include "access/xact.h"
      41             : #include "miscadmin.h"
      42             : #include "pgstat.h"
      43             : #include "postmaster/autovacuum.h"
      44             : #include "replication/slot.h"
      45             : #include "replication/syncrep.h"
      46             : #include "storage/condition_variable.h"
      47             : #include "storage/standby.h"
      48             : #include "storage/ipc.h"
      49             : #include "storage/lmgr.h"
      50             : #include "storage/pmsignal.h"
      51             : #include "storage/proc.h"
      52             : #include "storage/procarray.h"
      53             : #include "storage/procsignal.h"
      54             : #include "storage/spin.h"
      55             : #include "utils/timeout.h"
      56             : #include "utils/timestamp.h"
      57             : 
      58             : 
      59             : /* GUC variables */
      60             : int         DeadlockTimeout = 1000;
      61             : int         StatementTimeout = 0;
      62             : int         LockTimeout = 0;
      63             : int         IdleInTransactionSessionTimeout = 0;
      64             : bool        log_lock_waits = false;
      65             : 
      66             : /* Pointer to this process's PGPROC and PGXACT structs, if any */
      67             : PGPROC     *MyProc = NULL;
      68             : PGXACT     *MyPgXact = NULL;
      69             : 
      70             : /*
      71             :  * This spinlock protects the freelist of recycled PGPROC structures.
      72             :  * We cannot use an LWLock because the LWLock manager depends on already
      73             :  * having a PGPROC and a wait semaphore!  But these structures are touched
      74             :  * relatively infrequently (only at backend startup or shutdown) and not for
      75             :  * very long, so a spinlock is okay.
      76             :  */
      77             : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
      78             : 
      79             : /* Pointers to shared-memory structures */
      80             : PROC_HDR   *ProcGlobal = NULL;
      81             : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
      82             : PGPROC     *PreparedXactProcs = NULL;
      83             : 
      84             : /* If we are waiting for a lock, this points to the associated LOCALLOCK */
      85             : static LOCALLOCK *lockAwaited = NULL;
      86             : 
      87             : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
      88             : 
      89             : /* Is a deadlock check pending? */
      90             : static volatile sig_atomic_t got_deadlock_timeout;
      91             : 
      92             : static void RemoveProcFromArray(int code, Datum arg);
      93             : static void ProcKill(int code, Datum arg);
      94             : static void AuxiliaryProcKill(int code, Datum arg);
      95             : static void CheckDeadLock(void);
      96             : 
      97             : 
      98             : /*
      99             :  * Report shared-memory space needed by InitProcGlobal.
     100             :  */
     101             : Size
     102           5 : ProcGlobalShmemSize(void)
     103             : {
     104           5 :     Size        size = 0;
     105             : 
     106             :     /* ProcGlobal */
     107           5 :     size = add_size(size, sizeof(PROC_HDR));
     108             :     /* MyProcs, including autovacuum workers and launcher */
     109           5 :     size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
     110             :     /* AuxiliaryProcs */
     111           5 :     size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
     112             :     /* Prepared xacts */
     113           5 :     size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
     114             :     /* ProcStructLock */
     115           5 :     size = add_size(size, sizeof(slock_t));
     116             : 
     117           5 :     size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
     118           5 :     size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
     119           5 :     size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
     120             : 
     121           5 :     return size;
     122             : }
     123             : 
     124             : /*
     125             :  * Report number of semaphores needed by InitProcGlobal.
     126             :  */
     127             : int
     128           5 : ProcGlobalSemas(void)
     129             : {
     130             :     /*
     131             :      * We need a sema per backend (including autovacuum), plus one for each
     132             :      * auxiliary process.
     133             :      */
     134           5 :     return MaxBackends + NUM_AUXILIARY_PROCS;
     135             : }
     136             : 
     137             : /*
     138             :  * InitProcGlobal -
     139             :  *    Initialize the global process table during postmaster or standalone
     140             :  *    backend startup.
     141             :  *
     142             :  *    We also create all the per-process semaphores we will need to support
     143             :  *    the requested number of backends.  We used to allocate semaphores
     144             :  *    only when backends were actually started up, but that is bad because
     145             :  *    it lets Postgres fail under load --- a lot of Unix systems are
     146             :  *    (mis)configured with small limits on the number of semaphores, and
     147             :  *    running out when trying to start another backend is a common failure.
     148             :  *    So, now we grab enough semaphores to support the desired max number
     149             :  *    of backends immediately at initialization --- if the sysadmin has set
     150             :  *    MaxConnections, max_worker_processes, or autovacuum_max_workers higher
     151             :  *    than his kernel will support, he'll find out sooner rather than later.
     152             :  *
     153             :  *    Another reason for creating semaphores here is that the semaphore
     154             :  *    implementation typically requires us to create semaphores in the
     155             :  *    postmaster, not in backends.
     156             :  *
     157             :  * Note: this is NOT called by individual backends under a postmaster,
     158             :  * not even in the EXEC_BACKEND case.  The ProcGlobal and AuxiliaryProcs
     159             :  * pointers must be propagated specially for EXEC_BACKEND operation.
     160             :  */
     161             : void
     162           5 : InitProcGlobal(void)
     163             : {
     164             :     PGPROC     *procs;
     165             :     PGXACT     *pgxacts;
     166             :     int         i,
     167             :                 j;
     168             :     bool        found;
     169           5 :     uint32      TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
     170             : 
     171             :     /* Create the ProcGlobal shared structure */
     172           5 :     ProcGlobal = (PROC_HDR *)
     173           5 :         ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
     174           5 :     Assert(!found);
     175             : 
     176             :     /*
     177             :      * Initialize the data structures.
     178             :      */
     179           5 :     ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
     180           5 :     ProcGlobal->freeProcs = NULL;
     181           5 :     ProcGlobal->autovacFreeProcs = NULL;
     182           5 :     ProcGlobal->bgworkerFreeProcs = NULL;
     183           5 :     ProcGlobal->startupProc = NULL;
     184           5 :     ProcGlobal->startupProcPid = 0;
     185           5 :     ProcGlobal->startupBufferPinWaitBufId = -1;
     186           5 :     ProcGlobal->walwriterLatch = NULL;
     187           5 :     ProcGlobal->checkpointerLatch = NULL;
     188           5 :     pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PGPROCNO);
     189           5 :     pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PGPROCNO);
     190             : 
     191             :     /*
     192             :      * Create and initialize all the PGPROC structures we'll need.  There are
     193             :      * five separate consumers: (1) normal backends, (2) autovacuum workers
     194             :      * and the autovacuum launcher, (3) background workers, (4) auxiliary
     195             :      * processes, and (5) prepared transactions.  Each PGPROC structure is
     196             :      * dedicated to exactly one of these purposes, and they do not move
     197             :      * between groups.
     198             :      */
     199           5 :     procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
     200           5 :     MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
     201           5 :     ProcGlobal->allProcs = procs;
     202             :     /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
     203           5 :     ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
     204             : 
     205             :     /*
     206             :      * Also allocate a separate array of PGXACT structures.  This is separate
     207             :      * from the main PGPROC array so that the most heavily accessed data is
     208             :      * stored contiguously in memory in as few cache lines as possible. This
     209             :      * provides significant performance benefits, especially on a
     210             :      * multiprocessor system.  There is one PGXACT structure for every PGPROC
     211             :      * structure.
     212             :      */
     213           5 :     pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
     214           5 :     MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
     215           5 :     ProcGlobal->allPgXact = pgxacts;
     216             : 
     217         587 :     for (i = 0; i < TotalProcs; i++)
     218             :     {
     219             :         /* Common initialization for all PGPROCs, regardless of type. */
     220             : 
     221             :         /*
     222             :          * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
     223             :          * dummy PGPROCs don't need these though - they're never associated
     224             :          * with a real process
     225             :          */
     226         582 :         if (i < MaxBackends + NUM_AUXILIARY_PROCS)
     227             :         {
     228         580 :             procs[i].sem = PGSemaphoreCreate();
     229         580 :             InitSharedLatch(&(procs[i].procLatch));
     230         580 :             LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
     231             :         }
     232         582 :         procs[i].pgprocno = i;
     233             : 
     234             :         /*
     235             :          * Newly created PGPROCs for normal backends, autovacuum and bgworkers
     236             :          * must be queued up on the appropriate free list.  Because there can
     237             :          * only ever be a small, fixed number of auxiliary processes, no free
     238             :          * list is used in that case; InitAuxiliaryProcess() instead uses a
     239             :          * linear search.   PGPROCs for prepared transactions are added to a
     240             :          * free list by TwoPhaseShmemInit().
     241             :          */
     242         582 :         if (i < MaxConnections)
     243             :         {
     244             :             /* PGPROC for normal backend, add to freeProcs list */
     245         500 :             procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
     246         500 :             ProcGlobal->freeProcs = &procs[i];
     247         500 :             procs[i].procgloballist = &ProcGlobal->freeProcs;
     248             :         }
     249          82 :         else if (i < MaxConnections + autovacuum_max_workers + 1)
     250             :         {
     251             :             /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
     252          20 :             procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
     253          20 :             ProcGlobal->autovacFreeProcs = &procs[i];
     254          20 :             procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
     255             :         }
     256          62 :         else if (i < MaxBackends)
     257             :         {
     258             :             /* PGPROC for bgworker, add to bgworkerFreeProcs list */
     259          40 :             procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
     260          40 :             ProcGlobal->bgworkerFreeProcs = &procs[i];
     261          40 :             procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
     262             :         }
     263             : 
     264             :         /* Initialize myProcLocks[] shared memory queues. */
     265        9894 :         for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
     266        9312 :             SHMQueueInit(&(procs[i].myProcLocks[j]));
     267             : 
     268             :         /* Initialize lockGroupMembers list. */
     269         582 :         dlist_init(&procs[i].lockGroupMembers);
     270             :     }
     271             : 
     272             :     /*
     273             :      * Save pointers to the blocks of PGPROC structures reserved for auxiliary
     274             :      * processes and prepared transactions.
     275             :      */
     276           5 :     AuxiliaryProcs = &procs[MaxBackends];
     277           5 :     PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
     278             : 
     279             :     /* Create ProcStructLock spinlock, too */
     280           5 :     ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
     281           5 :     SpinLockInit(ProcStructLock);
     282           5 : }
     283             : 
     284             : /*
     285             :  * InitProcess -- initialize a per-process data structure for this backend
     286             :  */
     287             : void
     288         338 : InitProcess(void)
     289             : {
     290             :     PGPROC     *volatile *procgloballist;
     291             : 
     292             :     /*
     293             :      * ProcGlobal should be set up already (if we are a backend, we inherit
     294             :      * this by fork() or EXEC_BACKEND mechanism from the postmaster).
     295             :      */
     296         338 :     if (ProcGlobal == NULL)
     297           0 :         elog(PANIC, "proc header uninitialized");
     298             : 
     299         338 :     if (MyProc != NULL)
     300           0 :         elog(ERROR, "you already exist");
     301             : 
     302             :     /* Decide which list should supply our PGPROC. */
     303         338 :     if (IsAnyAutoVacuumProcess())
     304           5 :         procgloballist = &ProcGlobal->autovacFreeProcs;
     305         333 :     else if (IsBackgroundWorker)
     306         116 :         procgloballist = &ProcGlobal->bgworkerFreeProcs;
     307             :     else
     308         217 :         procgloballist = &ProcGlobal->freeProcs;
     309             : 
     310             :     /*
     311             :      * Try to get a proc struct from the appropriate free list.  If this
     312             :      * fails, we must be out of PGPROC structures (not to mention semaphores).
     313             :      *
     314             :      * While we are holding the ProcStructLock, also copy the current shared
     315             :      * estimate of spins_per_delay to local storage.
     316             :      */
     317         338 :     SpinLockAcquire(ProcStructLock);
     318             : 
     319         338 :     set_spins_per_delay(ProcGlobal->spins_per_delay);
     320             : 
     321         338 :     MyProc = *procgloballist;
     322             : 
     323         338 :     if (MyProc != NULL)
     324             :     {
     325         338 :         *procgloballist = (PGPROC *) MyProc->links.next;
     326         338 :         SpinLockRelease(ProcStructLock);
     327             :     }
     328             :     else
     329             :     {
     330             :         /*
     331             :          * If we reach here, all the PGPROCs are in use.  This is one of the
     332             :          * possible places to detect "too many backends", so give the standard
     333             :          * error message.  XXX do we need to give a different failure message
     334             :          * in the autovacuum case?
     335             :          */
     336           0 :         SpinLockRelease(ProcStructLock);
     337           0 :         ereport(FATAL,
     338             :                 (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
     339             :                  errmsg("sorry, too many clients already")));
     340             :     }
     341         338 :     MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
     342             : 
     343             :     /*
     344             :      * Cross-check that the PGPROC is of the type we expect; if this were not
     345             :      * the case, it would get returned to the wrong list.
     346             :      */
     347         338 :     Assert(MyProc->procgloballist == procgloballist);
     348             : 
     349             :     /*
     350             :      * Now that we have a PGPROC, mark ourselves as an active postmaster
     351             :      * child; this is so that the postmaster can detect it if we exit without
     352             :      * cleaning up.  (XXX autovac launcher currently doesn't participate in
     353             :      * this; it probably should.)
     354             :      */
     355         338 :     if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
     356         335 :         MarkPostmasterChildActive();
     357             : 
     358             :     /*
     359             :      * Initialize all fields of MyProc, except for those previously
     360             :      * initialized by InitProcGlobal.
     361             :      */
     362         338 :     SHMQueueElemInit(&(MyProc->links));
     363         338 :     MyProc->waitStatus = STATUS_OK;
     364         338 :     MyProc->lxid = InvalidLocalTransactionId;
     365         338 :     MyProc->fpVXIDLock = false;
     366         338 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
     367         338 :     MyPgXact->xid = InvalidTransactionId;
     368         338 :     MyPgXact->xmin = InvalidTransactionId;
     369         338 :     MyProc->pid = MyProcPid;
     370             :     /* backendId, databaseId and roleId will be filled in later */
     371         338 :     MyProc->backendId = InvalidBackendId;
     372         338 :     MyProc->databaseId = InvalidOid;
     373         338 :     MyProc->roleId = InvalidOid;
     374         338 :     MyProc->isBackgroundWorker = IsBackgroundWorker;
     375         338 :     MyPgXact->delayChkpt = false;
     376         338 :     MyPgXact->vacuumFlags = 0;
     377             :     /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
     378         338 :     if (IsAutoVacuumWorkerProcess())
     379           4 :         MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
     380         338 :     MyProc->lwWaiting = false;
     381         338 :     MyProc->lwWaitMode = 0;
     382         338 :     MyProc->waitLock = NULL;
     383         338 :     MyProc->waitProcLock = NULL;
     384             : #ifdef USE_ASSERT_CHECKING
     385             :     {
     386             :         int         i;
     387             : 
     388             :         /* Last process should have released all locks. */
     389        5746 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     390        5408 :             Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
     391             :     }
     392             : #endif
     393         338 :     MyProc->recoveryConflictPending = false;
     394             : 
     395             :     /* Initialize fields for sync rep */
     396         338 :     MyProc->waitLSN = 0;
     397         338 :     MyProc->syncRepState = SYNC_REP_NOT_WAITING;
     398         338 :     SHMQueueElemInit(&(MyProc->syncRepLinks));
     399             : 
     400             :     /* Initialize fields for group XID clearing. */
     401         338 :     MyProc->procArrayGroupMember = false;
     402         338 :     MyProc->procArrayGroupMemberXid = InvalidTransactionId;
     403         338 :     pg_atomic_init_u32(&MyProc->procArrayGroupNext, INVALID_PGPROCNO);
     404             : 
     405             :     /* Check that group locking fields are in a proper initial state. */
     406         338 :     Assert(MyProc->lockGroupLeader == NULL);
     407         338 :     Assert(dlist_is_empty(&MyProc->lockGroupMembers));
     408             : 
     409             :     /* Initialize wait event information. */
     410         338 :     MyProc->wait_event_info = 0;
     411             : 
     412             :     /* Initialize fields for group transaction status update. */
     413         338 :     MyProc->clogGroupMember = false;
     414         338 :     MyProc->clogGroupMemberXid = InvalidTransactionId;
     415         338 :     MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
     416         338 :     MyProc->clogGroupMemberPage = -1;
     417         338 :     MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
     418         338 :     pg_atomic_init_u32(&MyProc->clogGroupNext, INVALID_PGPROCNO);
     419             : 
     420             :     /*
     421             :      * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
     422             :      * on it.  That allows us to repoint the process latch, which so far
     423             :      * points to process local one, to the shared one.
     424             :      */
     425         338 :     OwnLatch(&MyProc->procLatch);
     426         338 :     SwitchToSharedLatch();
     427             : 
     428             :     /*
     429             :      * We might be reusing a semaphore that belonged to a failed process. So
     430             :      * be careful and reinitialize its value here.  (This is not strictly
     431             :      * necessary anymore, but seems like a good idea for cleanliness.)
     432             :      */
     433         338 :     PGSemaphoreReset(MyProc->sem);
     434             : 
     435             :     /*
     436             :      * Arrange to clean up at backend exit.
     437             :      */
     438         338 :     on_shmem_exit(ProcKill, 0);
     439             : 
     440             :     /*
     441             :      * Now that we have a PGPROC, we could try to acquire locks, so initialize
     442             :      * local state needed for LWLocks, and the deadlock checker.
     443             :      */
     444         338 :     InitLWLockAccess();
     445         338 :     InitDeadLockChecking();
     446         338 : }
     447             : 
     448             : /*
     449             :  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
     450             :  *
     451             :  * This is separate from InitProcess because we can't acquire LWLocks until
     452             :  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
     453             :  * work until after we've done CreateSharedMemoryAndSemaphores.
     454             :  */
     455             : void
     456         338 : InitProcessPhase2(void)
     457             : {
     458         338 :     Assert(MyProc != NULL);
     459             : 
     460             :     /*
     461             :      * Add our PGPROC to the PGPROC array in shared memory.
     462             :      */
     463         338 :     ProcArrayAdd(MyProc);
     464             : 
     465             :     /*
     466             :      * Arrange to clean that up at backend exit.
     467             :      */
     468         338 :     on_shmem_exit(RemoveProcFromArray, 0);
     469         338 : }
     470             : 
     471             : /*
     472             :  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
     473             :  *
     474             :  * This is called by bgwriter and similar processes so that they will have a
     475             :  * MyProc value that's real enough to let them wait for LWLocks.  The PGPROC
     476             :  * and sema that are assigned are one of the extra ones created during
     477             :  * InitProcGlobal.
     478             :  *
     479             :  * Auxiliary processes are presently not expected to wait for real (lockmgr)
     480             :  * locks, so we need not set up the deadlock checker.  They are never added
     481             :  * to the ProcArray or the sinval messaging mechanism, either.  They also
     482             :  * don't get a VXID assigned, since this is only useful when we actually
     483             :  * hold lockmgr locks.
     484             :  *
     485             :  * Startup process however uses locks but never waits for them in the
     486             :  * normal backend sense. Startup process also takes part in sinval messaging
     487             :  * as a sendOnly process, so never reads messages from sinval queue. So
     488             :  * Startup process does have a VXID and does show up in pg_locks.
     489             :  */
     490             : void
     491           4 : InitAuxiliaryProcess(void)
     492             : {
     493             :     PGPROC     *auxproc;
     494             :     int         proctype;
     495             : 
     496             :     /*
     497             :      * ProcGlobal should be set up already (if we are a backend, we inherit
     498             :      * this by fork() or EXEC_BACKEND mechanism from the postmaster).
     499             :      */
     500           4 :     if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
     501           0 :         elog(PANIC, "proc header uninitialized");
     502             : 
     503           4 :     if (MyProc != NULL)
     504           0 :         elog(ERROR, "you already exist");
     505             : 
     506             :     /*
     507             :      * We use the ProcStructLock to protect assignment and releasing of
     508             :      * AuxiliaryProcs entries.
     509             :      *
     510             :      * While we are holding the ProcStructLock, also copy the current shared
     511             :      * estimate of spins_per_delay to local storage.
     512             :      */
     513           4 :     SpinLockAcquire(ProcStructLock);
     514             : 
     515           4 :     set_spins_per_delay(ProcGlobal->spins_per_delay);
     516             : 
     517             :     /*
     518             :      * Find a free auxproc ... *big* trouble if there isn't one ...
     519             :      */
     520           7 :     for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
     521             :     {
     522           7 :         auxproc = &AuxiliaryProcs[proctype];
     523           7 :         if (auxproc->pid == 0)
     524           4 :             break;
     525             :     }
     526           4 :     if (proctype >= NUM_AUXILIARY_PROCS)
     527             :     {
     528           0 :         SpinLockRelease(ProcStructLock);
     529           0 :         elog(FATAL, "all AuxiliaryProcs are in use");
     530             :     }
     531             : 
     532             :     /* Mark auxiliary proc as in use by me */
     533             :     /* use volatile pointer to prevent code rearrangement */
     534           4 :     ((volatile PGPROC *) auxproc)->pid = MyProcPid;
     535             : 
     536           4 :     MyProc = auxproc;
     537           4 :     MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
     538             : 
     539           4 :     SpinLockRelease(ProcStructLock);
     540             : 
     541             :     /*
     542             :      * Initialize all fields of MyProc, except for those previously
     543             :      * initialized by InitProcGlobal.
     544             :      */
     545           4 :     SHMQueueElemInit(&(MyProc->links));
     546           4 :     MyProc->waitStatus = STATUS_OK;
     547           4 :     MyProc->lxid = InvalidLocalTransactionId;
     548           4 :     MyProc->fpVXIDLock = false;
     549           4 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
     550           4 :     MyPgXact->xid = InvalidTransactionId;
     551           4 :     MyPgXact->xmin = InvalidTransactionId;
     552           4 :     MyProc->backendId = InvalidBackendId;
     553           4 :     MyProc->databaseId = InvalidOid;
     554           4 :     MyProc->roleId = InvalidOid;
     555           4 :     MyProc->isBackgroundWorker = IsBackgroundWorker;
     556           4 :     MyPgXact->delayChkpt = false;
     557           4 :     MyPgXact->vacuumFlags = 0;
     558           4 :     MyProc->lwWaiting = false;
     559           4 :     MyProc->lwWaitMode = 0;
     560           4 :     MyProc->waitLock = NULL;
     561           4 :     MyProc->waitProcLock = NULL;
     562             : #ifdef USE_ASSERT_CHECKING
     563             :     {
     564             :         int         i;
     565             : 
     566             :         /* Last process should have released all locks. */
     567          68 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     568          64 :             Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
     569             :     }
     570             : #endif
     571             : 
     572             :     /*
     573             :      * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
     574             :      * on it.  That allows us to repoint the process latch, which so far
     575             :      * points to process local one, to the shared one.
     576             :      */
     577           4 :     OwnLatch(&MyProc->procLatch);
     578           4 :     SwitchToSharedLatch();
     579             : 
     580             :     /* Check that group locking fields are in a proper initial state. */
     581           4 :     Assert(MyProc->lockGroupLeader == NULL);
     582           4 :     Assert(dlist_is_empty(&MyProc->lockGroupMembers));
     583             : 
     584             :     /*
     585             :      * We might be reusing a semaphore that belonged to a failed process. So
     586             :      * be careful and reinitialize its value here.  (This is not strictly
     587             :      * necessary anymore, but seems like a good idea for cleanliness.)
     588             :      */
     589           4 :     PGSemaphoreReset(MyProc->sem);
     590             : 
     591             :     /*
     592             :      * Arrange to clean up at process exit.
     593             :      */
     594           4 :     on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
     595           4 : }
     596             : 
     597             : /*
     598             :  * Record the PID and PGPROC structures for the Startup process, for use in
     599             :  * ProcSendSignal().  See comments there for further explanation.
     600             :  */
     601             : void
     602           0 : PublishStartupProcessInformation(void)
     603             : {
     604           0 :     SpinLockAcquire(ProcStructLock);
     605             : 
     606           0 :     ProcGlobal->startupProc = MyProc;
     607           0 :     ProcGlobal->startupProcPid = MyProcPid;
     608             : 
     609           0 :     SpinLockRelease(ProcStructLock);
     610           0 : }
     611             : 
     612             : /*
     613             :  * Used from bufgr to share the value of the buffer that Startup waits on,
     614             :  * or to reset the value to "not waiting" (-1). This allows processing
     615             :  * of recovery conflicts for buffer pins. Set is made before backends look
     616             :  * at this value, so locking not required, especially since the set is
     617             :  * an atomic integer set operation.
     618             :  */
     619             : void
     620           0 : SetStartupBufferPinWaitBufId(int bufid)
     621             : {
     622             :     /* use volatile pointer to prevent code rearrangement */
     623           0 :     volatile PROC_HDR *procglobal = ProcGlobal;
     624             : 
     625           0 :     procglobal->startupBufferPinWaitBufId = bufid;
     626           0 : }
     627             : 
     628             : /*
     629             :  * Used by backends when they receive a request to check for buffer pin waits.
     630             :  */
     631             : int
     632           0 : GetStartupBufferPinWaitBufId(void)
     633             : {
     634             :     /* use volatile pointer to prevent code rearrangement */
     635           0 :     volatile PROC_HDR *procglobal = ProcGlobal;
     636             : 
     637           0 :     return procglobal->startupBufferPinWaitBufId;
     638             : }
     639             : 
     640             : /*
     641             :  * Check whether there are at least N free PGPROC objects.
     642             :  *
     643             :  * Note: this is designed on the assumption that N will generally be small.
     644             :  */
     645             : bool
     646           0 : HaveNFreeProcs(int n)
     647             : {
     648             :     PGPROC     *proc;
     649             : 
     650           0 :     SpinLockAcquire(ProcStructLock);
     651             : 
     652           0 :     proc = ProcGlobal->freeProcs;
     653             : 
     654           0 :     while (n > 0 && proc != NULL)
     655             :     {
     656           0 :         proc = (PGPROC *) proc->links.next;
     657           0 :         n--;
     658             :     }
     659             : 
     660           0 :     SpinLockRelease(ProcStructLock);
     661             : 
     662           0 :     return (n <= 0);
     663             : }
     664             : 
     665             : /*
     666             :  * Check if the current process is awaiting a lock.
     667             :  */
     668             : bool
     669           0 : IsWaitingForLock(void)
     670             : {
     671           0 :     if (lockAwaited == NULL)
     672           0 :         return false;
     673             : 
     674           0 :     return true;
     675             : }
     676             : 
     677             : /*
     678             :  * Cancel any pending wait for lock, when aborting a transaction, and revert
     679             :  * any strong lock count acquisition for a lock being acquired.
     680             :  *
     681             :  * (Normally, this would only happen if we accept a cancel/die
     682             :  * interrupt while waiting; but an ereport(ERROR) before or during the lock
     683             :  * wait is within the realm of possibility, too.)
     684             :  */
     685             : void
     686       29771 : LockErrorCleanup(void)
     687             : {
     688             :     LWLock     *partitionLock;
     689             :     DisableTimeoutParams timeouts[2];
     690             : 
     691       29771 :     HOLD_INTERRUPTS();
     692             : 
     693       29771 :     AbortStrongLockAcquire();
     694             : 
     695             :     /* Nothing to do if we weren't waiting for a lock */
     696       29771 :     if (lockAwaited == NULL)
     697             :     {
     698       29771 :         RESUME_INTERRUPTS();
     699       59542 :         return;
     700             :     }
     701             : 
     702             :     /*
     703             :      * Turn off the deadlock and lock timeout timers, if they are still
     704             :      * running (see ProcSleep).  Note we must preserve the LOCK_TIMEOUT
     705             :      * indicator flag, since this function is executed before
     706             :      * ProcessInterrupts when responding to SIGINT; else we'd lose the
     707             :      * knowledge that the SIGINT came from a lock timeout and not an external
     708             :      * source.
     709             :      */
     710           0 :     timeouts[0].id = DEADLOCK_TIMEOUT;
     711           0 :     timeouts[0].keep_indicator = false;
     712           0 :     timeouts[1].id = LOCK_TIMEOUT;
     713           0 :     timeouts[1].keep_indicator = true;
     714           0 :     disable_timeouts(timeouts, 2);
     715             : 
     716             :     /* Unlink myself from the wait queue, if on it (might not be anymore!) */
     717           0 :     partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
     718           0 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
     719             : 
     720           0 :     if (MyProc->links.next != NULL)
     721             :     {
     722             :         /* We could not have been granted the lock yet */
     723           0 :         RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
     724             :     }
     725             :     else
     726             :     {
     727             :         /*
     728             :          * Somebody kicked us off the lock queue already.  Perhaps they
     729             :          * granted us the lock, or perhaps they detected a deadlock. If they
     730             :          * did grant us the lock, we'd better remember it in our local lock
     731             :          * table.
     732             :          */
     733           0 :         if (MyProc->waitStatus == STATUS_OK)
     734           0 :             GrantAwaitedLock();
     735             :     }
     736             : 
     737           0 :     lockAwaited = NULL;
     738             : 
     739           0 :     LWLockRelease(partitionLock);
     740             : 
     741           0 :     RESUME_INTERRUPTS();
     742             : }
     743             : 
     744             : 
     745             : /*
     746             :  * ProcReleaseLocks() -- release locks associated with current transaction
     747             :  *          at main transaction commit or abort
     748             :  *
     749             :  * At main transaction commit, we release standard locks except session locks.
     750             :  * At main transaction abort, we release all locks including session locks.
     751             :  *
     752             :  * Advisory locks are released only if they are transaction-level;
     753             :  * session-level holds remain, whether this is a commit or not.
     754             :  *
     755             :  * At subtransaction commit, we don't release any locks (so this func is not
     756             :  * needed at all); we will defer the releasing to the parent transaction.
     757             :  * At subtransaction abort, we release all locks held by the subtransaction;
     758             :  * this is implemented by retail releasing of the locks under control of
     759             :  * the ResourceOwner mechanism.
     760             :  */
     761             : void
     762       26167 : ProcReleaseLocks(bool isCommit)
     763             : {
     764       26167 :     if (!MyProc)
     765       26167 :         return;
     766             :     /* If waiting, get off wait queue (should only be needed after error) */
     767       26167 :     LockErrorCleanup();
     768             :     /* Release standard locks, including session-level if aborting */
     769       26167 :     LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
     770             :     /* Release transaction-level advisory locks */
     771       26167 :     LockReleaseAll(USER_LOCKMETHOD, false);
     772             : }
     773             : 
     774             : 
     775             : /*
     776             :  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
     777             :  */
     778             : static void
     779         338 : RemoveProcFromArray(int code, Datum arg)
     780             : {
     781         338 :     Assert(MyProc != NULL);
     782         338 :     ProcArrayRemove(MyProc, InvalidTransactionId);
     783         338 : }
     784             : 
     785             : /*
     786             :  * ProcKill() -- Destroy the per-proc data structure for
     787             :  *      this process. Release any of its held LW locks.
     788             :  */
     789             : static void
     790         338 : ProcKill(int code, Datum arg)
     791             : {
     792             :     PGPROC     *proc;
     793             :     PGPROC     *volatile *procgloballist;
     794             : 
     795         338 :     Assert(MyProc != NULL);
     796             : 
     797             :     /* Make sure we're out of the sync rep lists */
     798         338 :     SyncRepCleanupAtProcExit();
     799             : 
     800             : #ifdef USE_ASSERT_CHECKING
     801             :     {
     802             :         int         i;
     803             : 
     804             :         /* Last process should have released all locks. */
     805        5746 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
     806        5408 :             Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
     807             :     }
     808             : #endif
     809             : 
     810             :     /*
     811             :      * Release any LW locks I am holding.  There really shouldn't be any, but
     812             :      * it's cheap to check again before we cut the knees off the LWLock
     813             :      * facility by releasing our PGPROC ...
     814             :      */
     815         338 :     LWLockReleaseAll();
     816             : 
     817             :     /* Cancel any pending condition variable sleep, too */
     818         338 :     ConditionVariableCancelSleep();
     819             : 
     820             :     /* Make sure active replication slots are released */
     821         338 :     if (MyReplicationSlot != NULL)
     822           0 :         ReplicationSlotRelease();
     823             : 
     824             :     /* Also cleanup all the temporary slots. */
     825         338 :     ReplicationSlotCleanup();
     826             : 
     827             :     /*
     828             :      * Detach from any lock group of which we are a member.  If the leader
     829             :      * exist before all other group members, it's PGPROC will remain allocated
     830             :      * until the last group process exits; that process must return the
     831             :      * leader's PGPROC to the appropriate list.
     832             :      */
     833         338 :     if (MyProc->lockGroupLeader != NULL)
     834             :     {
     835         116 :         PGPROC     *leader = MyProc->lockGroupLeader;
     836         116 :         LWLock     *leader_lwlock = LockHashPartitionLockByProc(leader);
     837             : 
     838         116 :         LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
     839         116 :         Assert(!dlist_is_empty(&leader->lockGroupMembers));
     840         116 :         dlist_delete(&MyProc->lockGroupLink);
     841         116 :         if (dlist_is_empty(&leader->lockGroupMembers))
     842             :         {
     843           1 :             leader->lockGroupLeader = NULL;
     844           1 :             if (leader != MyProc)
     845             :             {
     846           0 :                 procgloballist = leader->procgloballist;
     847             : 
     848             :                 /* Leader exited first; return its PGPROC. */
     849           0 :                 SpinLockAcquire(ProcStructLock);
     850           0 :                 leader->links.next = (SHM_QUEUE *) *procgloballist;
     851           0 :                 *procgloballist = leader;
     852           0 :                 SpinLockRelease(ProcStructLock);
     853             :             }
     854             :         }
     855         115 :         else if (leader != MyProc)
     856         115 :             MyProc->lockGroupLeader = NULL;
     857         116 :         LWLockRelease(leader_lwlock);
     858             :     }
     859             : 
     860             :     /*
     861             :      * Reset MyLatch to the process local one.  This is so that signal
     862             :      * handlers et al can continue using the latch after the shared latch
     863             :      * isn't ours anymore. After that clear MyProc and disown the shared
     864             :      * latch.
     865             :      */
     866         338 :     SwitchBackToLocalLatch();
     867         338 :     proc = MyProc;
     868         338 :     MyProc = NULL;
     869         338 :     DisownLatch(&proc->procLatch);
     870             : 
     871         338 :     procgloballist = proc->procgloballist;
     872         338 :     SpinLockAcquire(ProcStructLock);
     873             : 
     874             :     /*
     875             :      * If we're still a member of a locking group, that means we're a leader
     876             :      * which has somehow exited before its children.  The last remaining child
     877             :      * will release our PGPROC.  Otherwise, release it now.
     878             :      */
     879         338 :     if (proc->lockGroupLeader == NULL)
     880             :     {
     881             :         /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
     882         338 :         Assert(dlist_is_empty(&proc->lockGroupMembers));
     883             : 
     884             :         /* Return PGPROC structure (and semaphore) to appropriate freelist */
     885         338 :         proc->links.next = (SHM_QUEUE *) *procgloballist;
     886         338 :         *procgloballist = proc;
     887             :     }
     888             : 
     889             :     /* Update shared estimate of spins_per_delay */
     890         338 :     ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
     891             : 
     892         338 :     SpinLockRelease(ProcStructLock);
     893             : 
     894             :     /*
     895             :      * This process is no longer present in shared memory in any meaningful
     896             :      * way, so tell the postmaster we've cleaned up acceptably well. (XXX
     897             :      * autovac launcher should be included here someday)
     898             :      */
     899         338 :     if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
     900         335 :         MarkPostmasterChildInactive();
     901             : 
     902             :     /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
     903         338 :     if (AutovacuumLauncherPid != 0)
     904           3 :         kill(AutovacuumLauncherPid, SIGUSR2);
     905         338 : }
     906             : 
     907             : /*
     908             :  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
     909             :  *      processes (bgwriter, etc).  The PGPROC and sema are not released, only
     910             :  *      marked as not-in-use.
     911             :  */
     912             : static void
     913           4 : AuxiliaryProcKill(int code, Datum arg)
     914             : {
     915           4 :     int         proctype = DatumGetInt32(arg);
     916             :     PGPROC     *auxproc PG_USED_FOR_ASSERTS_ONLY;
     917             :     PGPROC     *proc;
     918             : 
     919           4 :     Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
     920             : 
     921           4 :     auxproc = &AuxiliaryProcs[proctype];
     922             : 
     923           4 :     Assert(MyProc == auxproc);
     924             : 
     925             :     /* Release any LW locks I am holding (see notes above) */
     926           4 :     LWLockReleaseAll();
     927             : 
     928             :     /* Cancel any pending condition variable sleep, too */
     929           4 :     ConditionVariableCancelSleep();
     930             : 
     931             :     /*
     932             :      * Reset MyLatch to the process local one.  This is so that signal
     933             :      * handlers et al can continue using the latch after the shared latch
     934             :      * isn't ours anymore. After that clear MyProc and disown the shared
     935             :      * latch.
     936             :      */
     937           4 :     SwitchBackToLocalLatch();
     938           4 :     proc = MyProc;
     939           4 :     MyProc = NULL;
     940           4 :     DisownLatch(&proc->procLatch);
     941             : 
     942           4 :     SpinLockAcquire(ProcStructLock);
     943             : 
     944             :     /* Mark auxiliary proc no longer in use */
     945           4 :     proc->pid = 0;
     946             : 
     947             :     /* Update shared estimate of spins_per_delay */
     948           4 :     ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
     949             : 
     950           4 :     SpinLockRelease(ProcStructLock);
     951           4 : }
     952             : 
     953             : /*
     954             :  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
     955             :  * given its PID
     956             :  *
     957             :  * Returns NULL if not found.
     958             :  */
     959             : PGPROC *
     960           0 : AuxiliaryPidGetProc(int pid)
     961             : {
     962           0 :     PGPROC     *result = NULL;
     963             :     int         index;
     964             : 
     965           0 :     if (pid == 0)               /* never match dummy PGPROCs */
     966           0 :         return NULL;
     967             : 
     968           0 :     for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
     969             :     {
     970           0 :         PGPROC     *proc = &AuxiliaryProcs[index];
     971             : 
     972           0 :         if (proc->pid == pid)
     973             :         {
     974           0 :             result = proc;
     975           0 :             break;
     976             :         }
     977             :     }
     978           0 :     return result;
     979             : }
     980             : 
     981             : /*
     982             :  * ProcQueue package: routines for putting processes to sleep
     983             :  *      and  waking them up
     984             :  */
     985             : 
     986             : /*
     987             :  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
     988             :  *
     989             :  * Returns: a pointer to the queue
     990             :  * Side Effects: Initializes the queue if it wasn't there before
     991             :  */
     992             : #ifdef NOT_USED
     993             : PROC_QUEUE *
     994             : ProcQueueAlloc(const char *name)
     995             : {
     996             :     PROC_QUEUE *queue;
     997             :     bool        found;
     998             : 
     999             :     queue = (PROC_QUEUE *)
    1000             :         ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
    1001             : 
    1002             :     if (!found)
    1003             :         ProcQueueInit(queue);
    1004             : 
    1005             :     return queue;
    1006             : }
    1007             : #endif
    1008             : 
    1009             : /*
    1010             :  * ProcQueueInit -- initialize a shared memory process queue
    1011             :  */
    1012             : void
    1013       86353 : ProcQueueInit(PROC_QUEUE *queue)
    1014             : {
    1015       86353 :     SHMQueueInit(&(queue->links));
    1016       86353 :     queue->size = 0;
    1017       86353 : }
    1018             : 
    1019             : 
    1020             : /*
    1021             :  * ProcSleep -- put a process to sleep on the specified lock
    1022             :  *
    1023             :  * Caller must have set MyProc->heldLocks to reflect locks already held
    1024             :  * on the lockable object by this process (under all XIDs).
    1025             :  *
    1026             :  * The lock table's partition lock must be held at entry, and will be held
    1027             :  * at exit.
    1028             :  *
    1029             :  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
    1030             :  *
    1031             :  * ASSUME: that no one will fiddle with the queue until after
    1032             :  *      we release the partition lock.
    1033             :  *
    1034             :  * NOTES: The process queue is now a priority queue for locking.
    1035             :  */
    1036             : int
    1037          19 : ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
    1038             : {
    1039          19 :     LOCKMODE    lockmode = locallock->tag.mode;
    1040          19 :     LOCK       *lock = locallock->lock;
    1041          19 :     PROCLOCK   *proclock = locallock->proclock;
    1042          19 :     uint32      hashcode = locallock->hashcode;
    1043          19 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    1044          19 :     PROC_QUEUE *waitQueue = &(lock->waitProcs);
    1045          19 :     LOCKMASK    myHeldLocks = MyProc->heldLocks;
    1046          19 :     bool        early_deadlock = false;
    1047          19 :     bool        allow_autovacuum_cancel = true;
    1048             :     int         myWaitStatus;
    1049             :     PGPROC     *proc;
    1050          19 :     PGPROC     *leader = MyProc->lockGroupLeader;
    1051             :     int         i;
    1052             : 
    1053             :     /*
    1054             :      * If group locking is in use, locks held by members of my locking group
    1055             :      * need to be included in myHeldLocks.
    1056             :      */
    1057          19 :     if (leader != NULL)
    1058             :     {
    1059           0 :         SHM_QUEUE  *procLocks = &(lock->procLocks);
    1060             :         PROCLOCK   *otherproclock;
    1061             : 
    1062           0 :         otherproclock = (PROCLOCK *)
    1063             :             SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
    1064           0 :         while (otherproclock != NULL)
    1065             :         {
    1066           0 :             if (otherproclock->groupLeader == leader)
    1067           0 :                 myHeldLocks |= otherproclock->holdMask;
    1068           0 :             otherproclock = (PROCLOCK *)
    1069           0 :                 SHMQueueNext(procLocks, &otherproclock->lockLink,
    1070             :                              offsetof(PROCLOCK, lockLink));
    1071             :         }
    1072             :     }
    1073             : 
    1074             :     /*
    1075             :      * Determine where to add myself in the wait queue.
    1076             :      *
    1077             :      * Normally I should go at the end of the queue.  However, if I already
    1078             :      * hold locks that conflict with the request of any previous waiter, put
    1079             :      * myself in the queue just in front of the first such waiter. This is not
    1080             :      * a necessary step, since deadlock detection would move me to before that
    1081             :      * waiter anyway; but it's relatively cheap to detect such a conflict
    1082             :      * immediately, and avoid delaying till deadlock timeout.
    1083             :      *
    1084             :      * Special case: if I find I should go in front of some waiter, check to
    1085             :      * see if I conflict with already-held locks or the requests before that
    1086             :      * waiter.  If not, then just grant myself the requested lock immediately.
    1087             :      * This is the same as the test for immediate grant in LockAcquire, except
    1088             :      * we are only considering the part of the wait queue before my insertion
    1089             :      * point.
    1090             :      */
    1091          19 :     if (myHeldLocks != 0)
    1092             :     {
    1093           1 :         LOCKMASK    aheadRequests = 0;
    1094             : 
    1095           1 :         proc = (PGPROC *) waitQueue->links.next;
    1096           1 :         for (i = 0; i < waitQueue->size; i++)
    1097             :         {
    1098             :             /*
    1099             :              * If we're part of the same locking group as this waiter, its
    1100             :              * locks neither conflict with ours nor contribute to
    1101             :              * aheadRequests.
    1102             :              */
    1103           1 :             if (leader != NULL && leader == proc->lockGroupLeader)
    1104             :             {
    1105           0 :                 proc = (PGPROC *) proc->links.next;
    1106           0 :                 continue;
    1107             :             }
    1108             :             /* Must he wait for me? */
    1109           1 :             if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
    1110             :             {
    1111             :                 /* Must I wait for him ? */
    1112           1 :                 if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
    1113             :                 {
    1114             :                     /*
    1115             :                      * Yes, so we have a deadlock.  Easiest way to clean up
    1116             :                      * correctly is to call RemoveFromWaitQueue(), but we
    1117             :                      * can't do that until we are *on* the wait queue. So, set
    1118             :                      * a flag to check below, and break out of loop.  Also,
    1119             :                      * record deadlock info for later message.
    1120             :                      */
    1121           0 :                     RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
    1122           0 :                     early_deadlock = true;
    1123           0 :                     break;
    1124             :                 }
    1125             :                 /* I must go before this waiter.  Check special case. */
    1126           2 :                 if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
    1127           1 :                     LockCheckConflicts(lockMethodTable,
    1128             :                                        lockmode,
    1129             :                                        lock,
    1130             :                                        proclock) == STATUS_OK)
    1131             :                 {
    1132             :                     /* Skip the wait and just grant myself the lock. */
    1133           1 :                     GrantLock(lock, proclock, lockmode);
    1134           1 :                     GrantAwaitedLock();
    1135           1 :                     return STATUS_OK;
    1136             :                 }
    1137             :                 /* Break out of loop to put myself before him */
    1138           0 :                 break;
    1139             :             }
    1140             :             /* Nope, so advance to next waiter */
    1141           0 :             aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
    1142           0 :             proc = (PGPROC *) proc->links.next;
    1143             :         }
    1144             : 
    1145             :         /*
    1146             :          * If we fall out of loop normally, proc points to waitQueue head, so
    1147             :          * we will insert at tail of queue as desired.
    1148             :          */
    1149             :     }
    1150             :     else
    1151             :     {
    1152             :         /* I hold no locks, so I can't push in front of anyone. */
    1153          18 :         proc = (PGPROC *) &(waitQueue->links);
    1154             :     }
    1155             : 
    1156             :     /*
    1157             :      * Insert self into queue, ahead of the given proc (or at tail of queue).
    1158             :      */
    1159          18 :     SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
    1160          18 :     waitQueue->size++;
    1161             : 
    1162          18 :     lock->waitMask |= LOCKBIT_ON(lockmode);
    1163             : 
    1164             :     /* Set up wait information in PGPROC object, too */
    1165          18 :     MyProc->waitLock = lock;
    1166          18 :     MyProc->waitProcLock = proclock;
    1167          18 :     MyProc->waitLockMode = lockmode;
    1168             : 
    1169          18 :     MyProc->waitStatus = STATUS_WAITING;
    1170             : 
    1171             :     /*
    1172             :      * If we detected deadlock, give up without waiting.  This must agree with
    1173             :      * CheckDeadLock's recovery code, except that we shouldn't release the
    1174             :      * semaphore since we haven't tried to lock it yet.
    1175             :      */
    1176          18 :     if (early_deadlock)
    1177             :     {
    1178           0 :         RemoveFromWaitQueue(MyProc, hashcode);
    1179           0 :         return STATUS_ERROR;
    1180             :     }
    1181             : 
    1182             :     /* mark that we are waiting for a lock */
    1183          18 :     lockAwaited = locallock;
    1184             : 
    1185             :     /*
    1186             :      * Release the lock table's partition lock.
    1187             :      *
    1188             :      * NOTE: this may also cause us to exit critical-section state, possibly
    1189             :      * allowing a cancel/die interrupt to be accepted. This is OK because we
    1190             :      * have recorded the fact that we are waiting for a lock, and so
    1191             :      * LockErrorCleanup will clean up if cancel/die happens.
    1192             :      */
    1193          18 :     LWLockRelease(partitionLock);
    1194             : 
    1195             :     /*
    1196             :      * Also, now that we will successfully clean up after an ereport, it's
    1197             :      * safe to check to see if there's a buffer pin deadlock against the
    1198             :      * Startup process.  Of course, that's only necessary if we're doing Hot
    1199             :      * Standby and are not the Startup process ourselves.
    1200             :      */
    1201          18 :     if (RecoveryInProgress() && !InRecovery)
    1202           0 :         CheckRecoveryConflictDeadlock();
    1203             : 
    1204             :     /* Reset deadlock_state before enabling the timeout handler */
    1205          18 :     deadlock_state = DS_NOT_YET_CHECKED;
    1206          18 :     got_deadlock_timeout = false;
    1207             : 
    1208             :     /*
    1209             :      * Set timer so we can wake up after awhile and check for a deadlock. If a
    1210             :      * deadlock is detected, the handler releases the process's semaphore and
    1211             :      * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
    1212             :      * must report failure rather than success.
    1213             :      *
    1214             :      * By delaying the check until we've waited for a bit, we can avoid
    1215             :      * running the rather expensive deadlock-check code in most cases.
    1216             :      *
    1217             :      * If LockTimeout is set, also enable the timeout for that.  We can save a
    1218             :      * few cycles by enabling both timeout sources in one call.
    1219             :      *
    1220             :      * If InHotStandby we set lock waits slightly later for clarity with other
    1221             :      * code.
    1222             :      */
    1223          18 :     if (!InHotStandby)
    1224             :     {
    1225          18 :         if (LockTimeout > 0)
    1226             :         {
    1227             :             EnableTimeoutParams timeouts[2];
    1228             : 
    1229           0 :             timeouts[0].id = DEADLOCK_TIMEOUT;
    1230           0 :             timeouts[0].type = TMPARAM_AFTER;
    1231           0 :             timeouts[0].delay_ms = DeadlockTimeout;
    1232           0 :             timeouts[1].id = LOCK_TIMEOUT;
    1233           0 :             timeouts[1].type = TMPARAM_AFTER;
    1234           0 :             timeouts[1].delay_ms = LockTimeout;
    1235           0 :             enable_timeouts(timeouts, 2);
    1236             :         }
    1237             :         else
    1238          18 :             enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
    1239             :     }
    1240             : 
    1241             :     /*
    1242             :      * If somebody wakes us between LWLockRelease and WaitLatch, the latch
    1243             :      * will not wait. But a set latch does not necessarily mean that the lock
    1244             :      * is free now, as there are many other sources for latch sets than
    1245             :      * somebody releasing the lock.
    1246             :      *
    1247             :      * We process interrupts whenever the latch has been set, so cancel/die
    1248             :      * interrupts are processed quickly. This means we must not mind losing
    1249             :      * control to a cancel/die interrupt here.  We don't, because we have no
    1250             :      * shared-state-change work to do after being granted the lock (the
    1251             :      * grantor did it all).  We do have to worry about canceling the deadlock
    1252             :      * timeout and updating the locallock table, but if we lose control to an
    1253             :      * error, LockErrorCleanup will fix that up.
    1254             :      */
    1255             :     do
    1256             :     {
    1257          19 :         if (InHotStandby)
    1258             :         {
    1259             :             /* Set a timer and wait for that or for the Lock to be granted */
    1260           0 :             ResolveRecoveryConflictWithLock(locallock->tag.lock);
    1261             :         }
    1262             :         else
    1263             :         {
    1264          19 :             WaitLatch(MyLatch, WL_LATCH_SET, 0,
    1265          19 :                       PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
    1266          19 :             ResetLatch(MyLatch);
    1267             :             /* check for deadlocks first, as that's probably log-worthy */
    1268          19 :             if (got_deadlock_timeout)
    1269             :             {
    1270           0 :                 CheckDeadLock();
    1271           0 :                 got_deadlock_timeout = false;
    1272             :             }
    1273          19 :             CHECK_FOR_INTERRUPTS();
    1274             :         }
    1275             : 
    1276             :         /*
    1277             :          * waitStatus could change from STATUS_WAITING to something else
    1278             :          * asynchronously.  Read it just once per loop to prevent surprising
    1279             :          * behavior (such as missing log messages).
    1280             :          */
    1281          19 :         myWaitStatus = *((volatile int *) &MyProc->waitStatus);
    1282             : 
    1283             :         /*
    1284             :          * If we are not deadlocked, but are waiting on an autovacuum-induced
    1285             :          * task, send a signal to interrupt it.
    1286             :          */
    1287          19 :         if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
    1288             :         {
    1289           0 :             PGPROC     *autovac = GetBlockingAutoVacuumPgproc();
    1290           0 :             PGXACT     *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno];
    1291             : 
    1292           0 :             LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
    1293             : 
    1294             :             /*
    1295             :              * Only do it if the worker is not working to protect against Xid
    1296             :              * wraparound.
    1297             :              */
    1298           0 :             if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) &&
    1299           0 :                 !(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
    1300           0 :             {
    1301           0 :                 int         pid = autovac->pid;
    1302             :                 StringInfoData locktagbuf;
    1303             :                 StringInfoData logbuf;  /* errdetail for server log */
    1304             : 
    1305           0 :                 initStringInfo(&locktagbuf);
    1306           0 :                 initStringInfo(&logbuf);
    1307           0 :                 DescribeLockTag(&locktagbuf, &lock->tag);
    1308           0 :                 appendStringInfo(&logbuf,
    1309             :                                  _("Process %d waits for %s on %s."),
    1310             :                                  MyProcPid,
    1311           0 :                                  GetLockmodeName(lock->tag.locktag_lockmethodid,
    1312             :                                                  lockmode),
    1313             :                                  locktagbuf.data);
    1314             : 
    1315             :                 /* release lock as quickly as possible */
    1316           0 :                 LWLockRelease(ProcArrayLock);
    1317             : 
    1318             :                 /* send the autovacuum worker Back to Old Kent Road */
    1319           0 :                 ereport(DEBUG1,
    1320             :                         (errmsg("sending cancel to blocking autovacuum PID %d",
    1321             :                                 pid),
    1322             :                          errdetail_log("%s", logbuf.data)));
    1323             : 
    1324           0 :                 if (kill(pid, SIGINT) < 0)
    1325             :                 {
    1326             :                     /*
    1327             :                      * There's a race condition here: once we release the
    1328             :                      * ProcArrayLock, it's possible for the autovac worker to
    1329             :                      * close up shop and exit before we can do the kill().
    1330             :                      * Therefore, we do not whinge about no-such-process.
    1331             :                      * Other errors such as EPERM could conceivably happen if
    1332             :                      * the kernel recycles the PID fast enough, but such cases
    1333             :                      * seem improbable enough that it's probably best to issue
    1334             :                      * a warning if we see some other errno.
    1335             :                      */
    1336           0 :                     if (errno != ESRCH)
    1337           0 :                         ereport(WARNING,
    1338             :                                 (errmsg("could not send signal to process %d: %m",
    1339             :                                         pid)));
    1340             :                 }
    1341             : 
    1342           0 :                 pfree(logbuf.data);
    1343           0 :                 pfree(locktagbuf.data);
    1344             :             }
    1345             :             else
    1346           0 :                 LWLockRelease(ProcArrayLock);
    1347             : 
    1348             :             /* prevent signal from being resent more than once */
    1349           0 :             allow_autovacuum_cancel = false;
    1350             :         }
    1351             : 
    1352             :         /*
    1353             :          * If awoken after the deadlock check interrupt has run, and
    1354             :          * log_lock_waits is on, then report about the wait.
    1355             :          */
    1356          19 :         if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
    1357             :         {
    1358             :             StringInfoData buf,
    1359             :                         lock_waiters_sbuf,
    1360             :                         lock_holders_sbuf;
    1361             :             const char *modename;
    1362             :             long        secs;
    1363             :             int         usecs;
    1364             :             long        msecs;
    1365             :             SHM_QUEUE  *procLocks;
    1366             :             PROCLOCK   *proclock;
    1367           0 :             bool        first_holder = true,
    1368           0 :                         first_waiter = true;
    1369           0 :             int         lockHoldersNum = 0;
    1370             : 
    1371           0 :             initStringInfo(&buf);
    1372           0 :             initStringInfo(&lock_waiters_sbuf);
    1373           0 :             initStringInfo(&lock_holders_sbuf);
    1374             : 
    1375           0 :             DescribeLockTag(&buf, &locallock->tag.lock);
    1376           0 :             modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
    1377             :                                        lockmode);
    1378           0 :             TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
    1379             :                                 GetCurrentTimestamp(),
    1380             :                                 &secs, &usecs);
    1381           0 :             msecs = secs * 1000 + usecs / 1000;
    1382           0 :             usecs = usecs % 1000;
    1383             : 
    1384             :             /*
    1385             :              * we loop over the lock's procLocks to gather a list of all
    1386             :              * holders and waiters. Thus we will be able to provide more
    1387             :              * detailed information for lock debugging purposes.
    1388             :              *
    1389             :              * lock->procLocks contains all processes which hold or wait for
    1390             :              * this lock.
    1391             :              */
    1392             : 
    1393           0 :             LWLockAcquire(partitionLock, LW_SHARED);
    1394             : 
    1395           0 :             procLocks = &(lock->procLocks);
    1396           0 :             proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
    1397             :                                                  offsetof(PROCLOCK, lockLink));
    1398             : 
    1399           0 :             while (proclock)
    1400             :             {
    1401             :                 /*
    1402             :                  * we are a waiter if myProc->waitProcLock == proclock; we are
    1403             :                  * a holder if it is NULL or something different
    1404             :                  */
    1405           0 :                 if (proclock->tag.myProc->waitProcLock == proclock)
    1406             :                 {
    1407           0 :                     if (first_waiter)
    1408             :                     {
    1409           0 :                         appendStringInfo(&lock_waiters_sbuf, "%d",
    1410           0 :                                          proclock->tag.myProc->pid);
    1411           0 :                         first_waiter = false;
    1412             :                     }
    1413             :                     else
    1414           0 :                         appendStringInfo(&lock_waiters_sbuf, ", %d",
    1415           0 :                                          proclock->tag.myProc->pid);
    1416             :                 }
    1417             :                 else
    1418             :                 {
    1419           0 :                     if (first_holder)
    1420             :                     {
    1421           0 :                         appendStringInfo(&lock_holders_sbuf, "%d",
    1422           0 :                                          proclock->tag.myProc->pid);
    1423           0 :                         first_holder = false;
    1424             :                     }
    1425             :                     else
    1426           0 :                         appendStringInfo(&lock_holders_sbuf, ", %d",
    1427           0 :                                          proclock->tag.myProc->pid);
    1428             : 
    1429           0 :                     lockHoldersNum++;
    1430             :                 }
    1431             : 
    1432           0 :                 proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
    1433             :                                                      offsetof(PROCLOCK, lockLink));
    1434             :             }
    1435             : 
    1436           0 :             LWLockRelease(partitionLock);
    1437             : 
    1438           0 :             if (deadlock_state == DS_SOFT_DEADLOCK)
    1439           0 :                 ereport(LOG,
    1440             :                         (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
    1441             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1442             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1443             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1444             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1445           0 :             else if (deadlock_state == DS_HARD_DEADLOCK)
    1446             :             {
    1447             :                 /*
    1448             :                  * This message is a bit redundant with the error that will be
    1449             :                  * reported subsequently, but in some cases the error report
    1450             :                  * might not make it to the log (eg, if it's caught by an
    1451             :                  * exception handler), and we want to ensure all long-wait
    1452             :                  * events get logged.
    1453             :                  */
    1454           0 :                 ereport(LOG,
    1455             :                         (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
    1456             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1457             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1458             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1459             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1460             :             }
    1461             : 
    1462           0 :             if (myWaitStatus == STATUS_WAITING)
    1463           0 :                 ereport(LOG,
    1464             :                         (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
    1465             :                                 MyProcPid, modename, buf.data, msecs, usecs),
    1466             :                          (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1467             :                                                "Processes holding the lock: %s. Wait queue: %s.",
    1468             :                                                lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1469           0 :             else if (myWaitStatus == STATUS_OK)
    1470           0 :                 ereport(LOG,
    1471             :                         (errmsg("process %d acquired %s on %s after %ld.%03d ms",
    1472             :                                 MyProcPid, modename, buf.data, msecs, usecs)));
    1473             :             else
    1474             :             {
    1475           0 :                 Assert(myWaitStatus == STATUS_ERROR);
    1476             : 
    1477             :                 /*
    1478             :                  * Currently, the deadlock checker always kicks its own
    1479             :                  * process, which means that we'll only see STATUS_ERROR when
    1480             :                  * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
    1481             :                  * print redundant messages.  But for completeness and
    1482             :                  * future-proofing, print a message if it looks like someone
    1483             :                  * else kicked us off the lock.
    1484             :                  */
    1485           0 :                 if (deadlock_state != DS_HARD_DEADLOCK)
    1486           0 :                     ereport(LOG,
    1487             :                             (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
    1488             :                                     MyProcPid, modename, buf.data, msecs, usecs),
    1489             :                              (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
    1490             :                                                    "Processes holding the lock: %s. Wait queue: %s.",
    1491             :                                                    lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
    1492             :             }
    1493             : 
    1494             :             /*
    1495             :              * At this point we might still need to wait for the lock. Reset
    1496             :              * state so we don't print the above messages again.
    1497             :              */
    1498           0 :             deadlock_state = DS_NO_DEADLOCK;
    1499             : 
    1500           0 :             pfree(buf.data);
    1501           0 :             pfree(lock_holders_sbuf.data);
    1502           0 :             pfree(lock_waiters_sbuf.data);
    1503             :         }
    1504          19 :     } while (myWaitStatus == STATUS_WAITING);
    1505             : 
    1506             :     /*
    1507             :      * Disable the timers, if they are still running.  As in LockErrorCleanup,
    1508             :      * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
    1509             :      * already caused QueryCancelPending to become set, we want the cancel to
    1510             :      * be reported as a lock timeout, not a user cancel.
    1511             :      */
    1512          18 :     if (!InHotStandby)
    1513             :     {
    1514          18 :         if (LockTimeout > 0)
    1515             :         {
    1516             :             DisableTimeoutParams timeouts[2];
    1517             : 
    1518           0 :             timeouts[0].id = DEADLOCK_TIMEOUT;
    1519           0 :             timeouts[0].keep_indicator = false;
    1520           0 :             timeouts[1].id = LOCK_TIMEOUT;
    1521           0 :             timeouts[1].keep_indicator = true;
    1522           0 :             disable_timeouts(timeouts, 2);
    1523             :         }
    1524             :         else
    1525          18 :             disable_timeout(DEADLOCK_TIMEOUT, false);
    1526             :     }
    1527             : 
    1528             :     /*
    1529             :      * Re-acquire the lock table's partition lock.  We have to do this to hold
    1530             :      * off cancel/die interrupts before we can mess with lockAwaited (else we
    1531             :      * might have a missed or duplicated locallock update).
    1532             :      */
    1533          18 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    1534             : 
    1535             :     /*
    1536             :      * We no longer want LockErrorCleanup to do anything.
    1537             :      */
    1538          18 :     lockAwaited = NULL;
    1539             : 
    1540             :     /*
    1541             :      * If we got the lock, be sure to remember it in the locallock table.
    1542             :      */
    1543          18 :     if (MyProc->waitStatus == STATUS_OK)
    1544          18 :         GrantAwaitedLock();
    1545             : 
    1546             :     /*
    1547             :      * We don't have to do anything else, because the awaker did all the
    1548             :      * necessary update of the lock table and MyProc.
    1549             :      */
    1550          18 :     return MyProc->waitStatus;
    1551             : }
    1552             : 
    1553             : 
    1554             : /*
    1555             :  * ProcWakeup -- wake up a process by releasing its private semaphore.
    1556             :  *
    1557             :  *   Also remove the process from the wait queue and set its links invalid.
    1558             :  *   RETURN: the next process in the wait queue.
    1559             :  *
    1560             :  * The appropriate lock partition lock must be held by caller.
    1561             :  *
    1562             :  * XXX: presently, this code is only used for the "success" case, and only
    1563             :  * works correctly for that case.  To clean up in failure case, would need
    1564             :  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
    1565             :  * Hence, in practice the waitStatus parameter must be STATUS_OK.
    1566             :  */
    1567             : PGPROC *
    1568          18 : ProcWakeup(PGPROC *proc, int waitStatus)
    1569             : {
    1570             :     PGPROC     *retProc;
    1571             : 
    1572             :     /* Proc should be sleeping ... */
    1573          36 :     if (proc->links.prev == NULL ||
    1574          18 :         proc->links.next == NULL)
    1575           0 :         return NULL;
    1576          18 :     Assert(proc->waitStatus == STATUS_WAITING);
    1577             : 
    1578             :     /* Save next process before we zap the list link */
    1579          18 :     retProc = (PGPROC *) proc->links.next;
    1580             : 
    1581             :     /* Remove process from wait queue */
    1582          18 :     SHMQueueDelete(&(proc->links));
    1583          18 :     (proc->waitLock->waitProcs.size)--;
    1584             : 
    1585             :     /* Clean up process' state and pass it the ok/fail signal */
    1586          18 :     proc->waitLock = NULL;
    1587          18 :     proc->waitProcLock = NULL;
    1588          18 :     proc->waitStatus = waitStatus;
    1589             : 
    1590             :     /* And awaken it */
    1591          18 :     SetLatch(&proc->procLatch);
    1592             : 
    1593          18 :     return retProc;
    1594             : }
    1595             : 
    1596             : /*
    1597             :  * ProcLockWakeup -- routine for waking up processes when a lock is
    1598             :  *      released (or a prior waiter is aborted).  Scan all waiters
    1599             :  *      for lock, waken any that are no longer blocked.
    1600             :  *
    1601             :  * The appropriate lock partition lock must be held by caller.
    1602             :  */
    1603             : void
    1604          15 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
    1605             : {
    1606          15 :     PROC_QUEUE *waitQueue = &(lock->waitProcs);
    1607          15 :     int         queue_size = waitQueue->size;
    1608             :     PGPROC     *proc;
    1609          15 :     LOCKMASK    aheadRequests = 0;
    1610             : 
    1611          15 :     Assert(queue_size >= 0);
    1612             : 
    1613          15 :     if (queue_size == 0)
    1614          15 :         return;
    1615             : 
    1616          15 :     proc = (PGPROC *) waitQueue->links.next;
    1617             : 
    1618          71 :     while (queue_size-- > 0)
    1619             :     {
    1620          41 :         LOCKMODE    lockmode = proc->waitLockMode;
    1621             : 
    1622             :         /*
    1623             :          * Waken if (a) doesn't conflict with requests of earlier waiters, and
    1624             :          * (b) doesn't conflict with already-held locks.
    1625             :          */
    1626          71 :         if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
    1627          30 :             LockCheckConflicts(lockMethodTable,
    1628             :                                lockmode,
    1629             :                                lock,
    1630             :                                proc->waitProcLock) == STATUS_OK)
    1631             :         {
    1632             :             /* OK to waken */
    1633          18 :             GrantLock(lock, proc->waitProcLock, lockmode);
    1634          18 :             proc = ProcWakeup(proc, STATUS_OK);
    1635             : 
    1636             :             /*
    1637             :              * ProcWakeup removes proc from the lock's waiting process queue
    1638             :              * and returns the next proc in chain; don't use proc's next-link,
    1639             :              * because it's been cleared.
    1640             :              */
    1641             :         }
    1642             :         else
    1643             :         {
    1644             :             /*
    1645             :              * Cannot wake this guy. Remember his request for later checks.
    1646             :              */
    1647          23 :             aheadRequests |= LOCKBIT_ON(lockmode);
    1648          23 :             proc = (PGPROC *) proc->links.next;
    1649             :         }
    1650             :     }
    1651             : 
    1652          15 :     Assert(waitQueue->size >= 0);
    1653             : }
    1654             : 
    1655             : /*
    1656             :  * CheckDeadLock
    1657             :  *
    1658             :  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
    1659             :  * lock to be released by some other process.  Check if there's a deadlock; if
    1660             :  * not, just return.  (But signal ProcSleep to log a message, if
    1661             :  * log_lock_waits is true.)  If we have a real deadlock, remove ourselves from
    1662             :  * the lock's wait queue and signal an error to ProcSleep.
    1663             :  */
    1664             : static void
    1665           0 : CheckDeadLock(void)
    1666             : {
    1667             :     int         i;
    1668             : 
    1669             :     /*
    1670             :      * Acquire exclusive lock on the entire shared lock data structures. Must
    1671             :      * grab LWLocks in partition-number order to avoid LWLock deadlock.
    1672             :      *
    1673             :      * Note that the deadlock check interrupt had better not be enabled
    1674             :      * anywhere that this process itself holds lock partition locks, else this
    1675             :      * will wait forever.  Also note that LWLockAcquire creates a critical
    1676             :      * section, so that this routine cannot be interrupted by cancel/die
    1677             :      * interrupts.
    1678             :      */
    1679           0 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    1680           0 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
    1681             : 
    1682             :     /*
    1683             :      * Check to see if we've been awoken by anyone in the interim.
    1684             :      *
    1685             :      * If we have, we can return and resume our transaction -- happy day.
    1686             :      * Before we are awoken the process releasing the lock grants it to us so
    1687             :      * we know that we don't have to wait anymore.
    1688             :      *
    1689             :      * We check by looking to see if we've been unlinked from the wait queue.
    1690             :      * This is quicker than checking our semaphore's state, since no kernel
    1691             :      * call is needed, and it is safe because we hold the lock partition lock.
    1692             :      */
    1693           0 :     if (MyProc->links.prev == NULL ||
    1694           0 :         MyProc->links.next == NULL)
    1695             :         goto check_done;
    1696             : 
    1697             : #ifdef LOCK_DEBUG
    1698             :     if (Debug_deadlocks)
    1699             :         DumpAllLocks();
    1700             : #endif
    1701             : 
    1702             :     /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
    1703           0 :     deadlock_state = DeadLockCheck(MyProc);
    1704             : 
    1705           0 :     if (deadlock_state == DS_HARD_DEADLOCK)
    1706             :     {
    1707             :         /*
    1708             :          * Oops.  We have a deadlock.
    1709             :          *
    1710             :          * Get this process out of wait state. (Note: we could do this more
    1711             :          * efficiently by relying on lockAwaited, but use this coding to
    1712             :          * preserve the flexibility to kill some other transaction than the
    1713             :          * one detecting the deadlock.)
    1714             :          *
    1715             :          * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
    1716             :          * ProcSleep will report an error after we return from the signal
    1717             :          * handler.
    1718             :          */
    1719           0 :         Assert(MyProc->waitLock != NULL);
    1720           0 :         RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
    1721             : 
    1722             :         /*
    1723             :          * We're done here.  Transaction abort caused by the error that
    1724             :          * ProcSleep will raise will cause any other locks we hold to be
    1725             :          * released, thus allowing other processes to wake up; we don't need
    1726             :          * to do that here.  NOTE: an exception is that releasing locks we
    1727             :          * hold doesn't consider the possibility of waiters that were blocked
    1728             :          * behind us on the lock we just failed to get, and might now be
    1729             :          * wakable because we're not in front of them anymore.  However,
    1730             :          * RemoveFromWaitQueue took care of waking up any such processes.
    1731             :          */
    1732             :     }
    1733             : 
    1734             :     /*
    1735             :      * And release locks.  We do this in reverse order for two reasons: (1)
    1736             :      * Anyone else who needs more than one of the locks will be trying to lock
    1737             :      * them in increasing order; we don't want to release the other process
    1738             :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    1739             :      * behavior inside LWLockRelease.
    1740             :      */
    1741             : check_done:
    1742           0 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    1743           0 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    1744           0 : }
    1745             : 
    1746             : /*
    1747             :  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
    1748             :  *
    1749             :  * NB: Runs inside a signal handler, be careful.
    1750             :  */
    1751             : void
    1752           0 : CheckDeadLockAlert(void)
    1753             : {
    1754           0 :     int         save_errno = errno;
    1755             : 
    1756           0 :     got_deadlock_timeout = true;
    1757             : 
    1758             :     /*
    1759             :      * Have to set the latch again, even if handle_sig_alarm already did. Back
    1760             :      * then got_deadlock_timeout wasn't yet set... It's unlikely that this
    1761             :      * ever would be a problem, but setting a set latch again is cheap.
    1762             :      */
    1763           0 :     SetLatch(MyLatch);
    1764           0 :     errno = save_errno;
    1765           0 : }
    1766             : 
    1767             : /*
    1768             :  * ProcWaitForSignal - wait for a signal from another backend.
    1769             :  *
    1770             :  * As this uses the generic process latch the caller has to be robust against
    1771             :  * unrelated wakeups: Always check that the desired state has occurred, and
    1772             :  * wait again if not.
    1773             :  */
    1774             : void
    1775           0 : ProcWaitForSignal(uint32 wait_event_info)
    1776             : {
    1777           0 :     WaitLatch(MyLatch, WL_LATCH_SET, 0, wait_event_info);
    1778           0 :     ResetLatch(MyLatch);
    1779           0 :     CHECK_FOR_INTERRUPTS();
    1780           0 : }
    1781             : 
    1782             : /*
    1783             :  * ProcSendSignal - send a signal to a backend identified by PID
    1784             :  */
    1785             : void
    1786           0 : ProcSendSignal(int pid)
    1787             : {
    1788           0 :     PGPROC     *proc = NULL;
    1789             : 
    1790           0 :     if (RecoveryInProgress())
    1791             :     {
    1792           0 :         SpinLockAcquire(ProcStructLock);
    1793             : 
    1794             :         /*
    1795             :          * Check to see whether it is the Startup process we wish to signal.
    1796             :          * This call is made by the buffer manager when it wishes to wake up a
    1797             :          * process that has been waiting for a pin in so it can obtain a
    1798             :          * cleanup lock using LockBufferForCleanup(). Startup is not a normal
    1799             :          * backend, so BackendPidGetProc() will not return any pid at all. So
    1800             :          * we remember the information for this special case.
    1801             :          */
    1802           0 :         if (pid == ProcGlobal->startupProcPid)
    1803           0 :             proc = ProcGlobal->startupProc;
    1804             : 
    1805           0 :         SpinLockRelease(ProcStructLock);
    1806             :     }
    1807             : 
    1808           0 :     if (proc == NULL)
    1809           0 :         proc = BackendPidGetProc(pid);
    1810             : 
    1811           0 :     if (proc != NULL)
    1812             :     {
    1813           0 :         SetLatch(&proc->procLatch);
    1814             :     }
    1815           0 : }
    1816             : 
    1817             : /*
    1818             :  * BecomeLockGroupLeader - designate process as lock group leader
    1819             :  *
    1820             :  * Once this function has returned, other processes can join the lock group
    1821             :  * by calling BecomeLockGroupMember.
    1822             :  */
    1823             : void
    1824          32 : BecomeLockGroupLeader(void)
    1825             : {
    1826             :     LWLock     *leader_lwlock;
    1827             : 
    1828             :     /* If we already did it, we don't need to do it again. */
    1829          32 :     if (MyProc->lockGroupLeader == MyProc)
    1830          63 :         return;
    1831             : 
    1832             :     /* We had better not be a follower. */
    1833           1 :     Assert(MyProc->lockGroupLeader == NULL);
    1834             : 
    1835             :     /* Create single-member group, containing only ourselves. */
    1836           1 :     leader_lwlock = LockHashPartitionLockByProc(MyProc);
    1837           1 :     LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
    1838           1 :     MyProc->lockGroupLeader = MyProc;
    1839           1 :     dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
    1840           1 :     LWLockRelease(leader_lwlock);
    1841             : }
    1842             : 
    1843             : /*
    1844             :  * BecomeLockGroupMember - designate process as lock group member
    1845             :  *
    1846             :  * This is pretty straightforward except for the possibility that the leader
    1847             :  * whose group we're trying to join might exit before we manage to do so;
    1848             :  * and the PGPROC might get recycled for an unrelated process.  To avoid
    1849             :  * that, we require the caller to pass the PID of the intended PGPROC as
    1850             :  * an interlock.  Returns true if we successfully join the intended lock
    1851             :  * group, and false if not.
    1852             :  */
    1853             : bool
    1854         115 : BecomeLockGroupMember(PGPROC *leader, int pid)
    1855             : {
    1856             :     LWLock     *leader_lwlock;
    1857         115 :     bool        ok = false;
    1858             : 
    1859             :     /* Group leader can't become member of group */
    1860         115 :     Assert(MyProc != leader);
    1861             : 
    1862             :     /* Can't already be a member of a group */
    1863         115 :     Assert(MyProc->lockGroupLeader == NULL);
    1864             : 
    1865             :     /* PID must be valid. */
    1866         115 :     Assert(pid != 0);
    1867             : 
    1868             :     /*
    1869             :      * Get lock protecting the group fields.  Note LockHashPartitionLockByProc
    1870             :      * accesses leader->pgprocno in a PGPROC that might be free.  This is safe
    1871             :      * because all PGPROCs' pgprocno fields are set during shared memory
    1872             :      * initialization and never change thereafter; so we will acquire the
    1873             :      * correct lock even if the leader PGPROC is in process of being recycled.
    1874             :      */
    1875         115 :     leader_lwlock = LockHashPartitionLockByProc(leader);
    1876         115 :     LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
    1877             : 
    1878             :     /* Is this the leader we're looking for? */
    1879         115 :     if (leader->pid == pid && leader->lockGroupLeader == leader)
    1880             :     {
    1881             :         /* OK, join the group */
    1882         115 :         ok = true;
    1883         115 :         MyProc->lockGroupLeader = leader;
    1884         115 :         dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
    1885             :     }
    1886         115 :     LWLockRelease(leader_lwlock);
    1887             : 
    1888         115 :     return ok;
    1889             : }

Generated by: LCOV version 1.11