LCOV - code coverage report
Current view: top level - src/backend/utils/mmgr - portalmem.c (source / functions) Hit Total Coverage
Test: PostgreSQL Lines: 276 301 91.7 %
Date: 2017-09-29 13:40:31 Functions: 23 23 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * portalmem.c
       4             :  *    backend portal memory management
       5             :  *
       6             :  * Portals are objects representing the execution state of a query.
       7             :  * This module provides memory management services for portals, but it
       8             :  * doesn't actually run the executor for them.
       9             :  *
      10             :  *
      11             :  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
      12             :  * Portions Copyright (c) 1994, Regents of the University of California
      13             :  *
      14             :  * IDENTIFICATION
      15             :  *    src/backend/utils/mmgr/portalmem.c
      16             :  *
      17             :  *-------------------------------------------------------------------------
      18             :  */
      19             : #include "postgres.h"
      20             : 
      21             : #include "access/xact.h"
      22             : #include "catalog/pg_type.h"
      23             : #include "commands/portalcmds.h"
      24             : #include "miscadmin.h"
      25             : #include "utils/builtins.h"
      26             : #include "utils/memutils.h"
      27             : #include "utils/snapmgr.h"
      28             : #include "utils/timestamp.h"
      29             : 
      30             : /*
      31             :  * Estimate of the maximum number of open portals a user would have,
      32             :  * used in initially sizing the PortalHashTable in EnablePortalManager().
      33             :  * Since the hash table can expand, there's no need to make this overly
      34             :  * generous, and keeping it small avoids unnecessary overhead in the
      35             :  * hash_seq_search() calls executed during transaction end.
      36             :  */
      37             : #define PORTALS_PER_USER       16
      38             : 
      39             : 
      40             : /* ----------------
      41             :  *      Global state
      42             :  * ----------------
      43             :  */
      44             : 
      45             : #define MAX_PORTALNAME_LEN      NAMEDATALEN
      46             : 
      47             : typedef struct portalhashent
      48             : {
      49             :     char        portalname[MAX_PORTALNAME_LEN];
      50             :     Portal      portal;
      51             : } PortalHashEnt;
      52             : 
      53             : static HTAB *PortalHashTable = NULL;
      54             : 
      55             : #define PortalHashTableLookup(NAME, PORTAL) \
      56             : do { \
      57             :     PortalHashEnt *hentry; \
      58             :     \
      59             :     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
      60             :                                            (NAME), HASH_FIND, NULL); \
      61             :     if (hentry) \
      62             :         PORTAL = hentry->portal; \
      63             :     else \
      64             :         PORTAL = NULL; \
      65             : } while(0)
      66             : 
      67             : #define PortalHashTableInsert(PORTAL, NAME) \
      68             : do { \
      69             :     PortalHashEnt *hentry; bool found; \
      70             :     \
      71             :     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
      72             :                                            (NAME), HASH_ENTER, &found); \
      73             :     if (found) \
      74             :         elog(ERROR, "duplicate portal name"); \
      75             :     hentry->portal = PORTAL; \
      76             :     /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
      77             :     PORTAL->name = hentry->portalname; \
      78             : } while(0)
      79             : 
      80             : #define PortalHashTableDelete(PORTAL) \
      81             : do { \
      82             :     PortalHashEnt *hentry; \
      83             :     \
      84             :     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
      85             :                                            PORTAL->name, HASH_REMOVE, NULL); \
      86             :     if (hentry == NULL) \
      87             :         elog(WARNING, "trying to delete portal name that does not exist"); \
      88             : } while(0)
      89             : 
      90             : static MemoryContext PortalMemory = NULL;
      91             : 
      92             : 
      93             : /* ----------------------------------------------------------------
      94             :  *                 public portal interface functions
      95             :  * ----------------------------------------------------------------
      96             :  */
      97             : 
      98             : /*
      99             :  * EnablePortalManager
     100             :  *      Enables the portal management module at backend startup.
     101             :  */
     102             : void
     103         338 : EnablePortalManager(void)
     104             : {
     105             :     HASHCTL     ctl;
     106             : 
     107         338 :     Assert(PortalMemory == NULL);
     108             : 
     109         338 :     PortalMemory = AllocSetContextCreate(TopMemoryContext,
     110             :                                          "PortalMemory",
     111             :                                          ALLOCSET_DEFAULT_SIZES);
     112             : 
     113         338 :     ctl.keysize = MAX_PORTALNAME_LEN;
     114         338 :     ctl.entrysize = sizeof(PortalHashEnt);
     115             : 
     116             :     /*
     117             :      * use PORTALS_PER_USER as a guess of how many hash table entries to
     118             :      * create, initially
     119             :      */
     120         338 :     PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
     121             :                                   &ctl, HASH_ELEM);
     122         338 : }
     123             : 
     124             : /*
     125             :  * GetPortalByName
     126             :  *      Returns a portal given a portal name, or NULL if name not found.
     127             :  */
     128             : Portal
     129       29331 : GetPortalByName(const char *name)
     130             : {
     131             :     Portal      portal;
     132             : 
     133       29331 :     if (PointerIsValid(name))
     134       29331 :         PortalHashTableLookup(name, portal);
     135             :     else
     136           0 :         portal = NULL;
     137             : 
     138       29331 :     return portal;
     139             : }
     140             : 
     141             : /*
     142             :  * PortalGetPrimaryStmt
     143             :  *      Get the "primary" stmt within a portal, ie, the one marked canSetTag.
     144             :  *
     145             :  * Returns NULL if no such stmt.  If multiple PlannedStmt structs within the
     146             :  * portal are marked canSetTag, returns the first one.  Neither of these
     147             :  * cases should occur in present usages of this function.
     148             :  */
     149             : PlannedStmt *
     150       13439 : PortalGetPrimaryStmt(Portal portal)
     151             : {
     152             :     ListCell   *lc;
     153             : 
     154       13439 :     foreach(lc, portal->stmts)
     155             :     {
     156       13439 :         PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
     157             : 
     158       13439 :         if (stmt->canSetTag)
     159       13439 :             return stmt;
     160             :     }
     161           0 :     return NULL;
     162             : }
     163             : 
     164             : /*
     165             :  * CreatePortal
     166             :  *      Returns a new portal given a name.
     167             :  *
     168             :  * allowDup: if true, automatically drop any pre-existing portal of the
     169             :  * same name (if false, an error is raised).
     170             :  *
     171             :  * dupSilent: if true, don't even emit a WARNING.
     172             :  */
     173             : Portal
     174       27306 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
     175             : {
     176             :     Portal      portal;
     177             : 
     178       27306 :     AssertArg(PointerIsValid(name));
     179             : 
     180       27306 :     portal = GetPortalByName(name);
     181       27306 :     if (PortalIsValid(portal))
     182             :     {
     183          93 :         if (!allowDup)
     184           0 :             ereport(ERROR,
     185             :                     (errcode(ERRCODE_DUPLICATE_CURSOR),
     186             :                      errmsg("cursor \"%s\" already exists", name)));
     187          93 :         if (!dupSilent)
     188           0 :             ereport(WARNING,
     189             :                     (errcode(ERRCODE_DUPLICATE_CURSOR),
     190             :                      errmsg("closing existing cursor \"%s\"",
     191             :                             name)));
     192          93 :         PortalDrop(portal, false);
     193             :     }
     194             : 
     195             :     /* make new portal structure */
     196       27306 :     portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);
     197             : 
     198             :     /* initialize portal heap context; typically it won't store much */
     199       27306 :     portal->heap = AllocSetContextCreate(PortalMemory,
     200             :                                          "PortalHeapMemory",
     201             :                                          ALLOCSET_SMALL_SIZES);
     202             : 
     203             :     /* create a resource owner for the portal */
     204       27306 :     portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
     205             :                                            "Portal");
     206             : 
     207             :     /* initialize portal fields that don't start off zero */
     208       27306 :     portal->status = PORTAL_NEW;
     209       27306 :     portal->cleanup = PortalCleanup;
     210       27306 :     portal->createSubid = GetCurrentSubTransactionId();
     211       27306 :     portal->activeSubid = portal->createSubid;
     212       27306 :     portal->strategy = PORTAL_MULTI_QUERY;
     213       27306 :     portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
     214       27306 :     portal->atStart = true;
     215       27306 :     portal->atEnd = true;        /* disallow fetches until query is set */
     216       27306 :     portal->visible = true;
     217       27306 :     portal->creation_time = GetCurrentStatementStartTimestamp();
     218             : 
     219             :     /* put portal in table (sets portal->name) */
     220       27306 :     PortalHashTableInsert(portal, name);
     221             : 
     222       27306 :     return portal;
     223             : }
     224             : 
     225             : /*
     226             :  * CreateNewPortal
     227             :  *      Create a new portal, assigning it a random nonconflicting name.
     228             :  */
     229             : Portal
     230         861 : CreateNewPortal(void)
     231             : {
     232             :     static unsigned int unnamed_portal_count = 0;
     233             : 
     234             :     char        portalname[MAX_PORTALNAME_LEN];
     235             : 
     236             :     /* Select a nonconflicting name */
     237             :     for (;;)
     238             :     {
     239         861 :         unnamed_portal_count++;
     240         861 :         sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
     241         861 :         if (GetPortalByName(portalname) == NULL)
     242         861 :             break;
     243           0 :     }
     244             : 
     245         861 :     return CreatePortal(portalname, false, false);
     246             : }
     247             : 
     248             : /*
     249             :  * PortalDefineQuery
     250             :  *      A simple subroutine to establish a portal's query.
     251             :  *
     252             :  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
     253             :  * allowed anymore to pass NULL.  (If you really don't have source text,
     254             :  * you can pass a constant string, perhaps "(query not available)".)
     255             :  *
     256             :  * commandTag shall be NULL if and only if the original query string
     257             :  * (before rewriting) was an empty string.  Also, the passed commandTag must
     258             :  * be a pointer to a constant string, since it is not copied.
     259             :  *
     260             :  * If cplan is provided, then it is a cached plan containing the stmts, and
     261             :  * the caller must have done GetCachedPlan(), causing a refcount increment.
     262             :  * The refcount will be released when the portal is destroyed.
     263             :  *
     264             :  * If cplan is NULL, then it is the caller's responsibility to ensure that
     265             :  * the passed plan trees have adequate lifetime.  Typically this is done by
     266             :  * copying them into the portal's heap context.
     267             :  *
     268             :  * The caller is also responsible for ensuring that the passed prepStmtName
     269             :  * (if not NULL) and sourceText have adequate lifetime.
     270             :  *
     271             :  * NB: this function mustn't do much beyond storing the passed values; in
     272             :  * particular don't do anything that risks elog(ERROR).  If that were to
     273             :  * happen here before storing the cplan reference, we'd leak the plancache
     274             :  * refcount that the caller is trying to hand off to us.
     275             :  */
     276             : void
     277       27301 : PortalDefineQuery(Portal portal,
     278             :                   const char *prepStmtName,
     279             :                   const char *sourceText,
     280             :                   const char *commandTag,
     281             :                   List *stmts,
     282             :                   CachedPlan *cplan)
     283             : {
     284       27301 :     AssertArg(PortalIsValid(portal));
     285       27301 :     AssertState(portal->status == PORTAL_NEW);
     286             : 
     287       27301 :     AssertArg(sourceText != NULL);
     288       27301 :     AssertArg(commandTag != NULL || stmts == NIL);
     289             : 
     290       27301 :     portal->prepStmtName = prepStmtName;
     291       27301 :     portal->sourceText = sourceText;
     292       27301 :     portal->commandTag = commandTag;
     293       27301 :     portal->stmts = stmts;
     294       27301 :     portal->cplan = cplan;
     295       27301 :     portal->status = PORTAL_DEFINED;
     296       27301 : }
     297             : 
     298             : /*
     299             :  * PortalReleaseCachedPlan
     300             :  *      Release a portal's reference to its cached plan, if any.
     301             :  */
     302             : static void
     303       29386 : PortalReleaseCachedPlan(Portal portal)
     304             : {
     305       29386 :     if (portal->cplan)
     306             :     {
     307         335 :         ReleaseCachedPlan(portal->cplan, false);
     308         335 :         portal->cplan = NULL;
     309             : 
     310             :         /*
     311             :          * We must also clear portal->stmts which is now a dangling reference
     312             :          * to the cached plan's plan list.  This protects any code that might
     313             :          * try to examine the Portal later.
     314             :          */
     315         335 :         portal->stmts = NIL;
     316             :     }
     317       29386 : }
     318             : 
     319             : /*
     320             :  * PortalCreateHoldStore
     321             :  *      Create the tuplestore for a portal.
     322             :  */
     323             : void
     324        1668 : PortalCreateHoldStore(Portal portal)
     325             : {
     326             :     MemoryContext oldcxt;
     327             : 
     328        1668 :     Assert(portal->holdContext == NULL);
     329        1668 :     Assert(portal->holdStore == NULL);
     330        1668 :     Assert(portal->holdSnapshot == NULL);
     331             : 
     332             :     /*
     333             :      * Create the memory context that is used for storage of the tuple set.
     334             :      * Note this is NOT a child of the portal's heap memory.
     335             :      */
     336        1668 :     portal->holdContext =
     337        1668 :         AllocSetContextCreate(PortalMemory,
     338             :                               "PortalHoldContext",
     339             :                               ALLOCSET_DEFAULT_SIZES);
     340             : 
     341             :     /*
     342             :      * Create the tuple store, selecting cross-transaction temp files, and
     343             :      * enabling random access only if cursor requires scrolling.
     344             :      *
     345             :      * XXX: Should maintenance_work_mem be used for the portal size?
     346             :      */
     347        1668 :     oldcxt = MemoryContextSwitchTo(portal->holdContext);
     348             : 
     349        1668 :     portal->holdStore =
     350        1668 :         tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
     351             :                               true, work_mem);
     352             : 
     353        1668 :     MemoryContextSwitchTo(oldcxt);
     354        1668 : }
     355             : 
     356             : /*
     357             :  * PinPortal
     358             :  *      Protect a portal from dropping.
     359             :  *
     360             :  * A pinned portal is still unpinned and dropped at transaction or
     361             :  * subtransaction abort.
     362             :  */
     363             : void
     364         681 : PinPortal(Portal portal)
     365             : {
     366         681 :     if (portal->portalPinned)
     367           0 :         elog(ERROR, "portal already pinned");
     368             : 
     369         681 :     portal->portalPinned = true;
     370         681 : }
     371             : 
     372             : void
     373         678 : UnpinPortal(Portal portal)
     374             : {
     375         678 :     if (!portal->portalPinned)
     376           0 :         elog(ERROR, "portal not pinned");
     377             : 
     378         678 :     portal->portalPinned = false;
     379         678 : }
     380             : 
     381             : /*
     382             :  * MarkPortalActive
     383             :  *      Transition a portal from READY to ACTIVE state.
     384             :  *
     385             :  * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
     386             :  */
     387             : void
     388       28167 : MarkPortalActive(Portal portal)
     389             : {
     390             :     /* For safety, this is a runtime test not just an Assert */
     391       28167 :     if (portal->status != PORTAL_READY)
     392           3 :         ereport(ERROR,
     393             :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
     394             :                  errmsg("portal \"%s\" cannot be run", portal->name)));
     395             :     /* Perform the state transition */
     396       28164 :     portal->status = PORTAL_ACTIVE;
     397       28164 :     portal->activeSubid = GetCurrentSubTransactionId();
     398       28164 : }
     399             : 
     400             : /*
     401             :  * MarkPortalDone
     402             :  *      Transition a portal from ACTIVE to DONE state.
     403             :  *
     404             :  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
     405             :  */
     406             : void
     407       13100 : MarkPortalDone(Portal portal)
     408             : {
     409             :     /* Perform the state transition */
     410       13100 :     Assert(portal->status == PORTAL_ACTIVE);
     411       13100 :     portal->status = PORTAL_DONE;
     412             : 
     413             :     /*
     414             :      * Allow portalcmds.c to clean up the state it knows about.  We might as
     415             :      * well do that now, since the portal can't be executed any more.
     416             :      *
     417             :      * In some cases involving execution of a ROLLBACK command in an already
     418             :      * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
     419             :      * with the cleanup hook still unexecuted.
     420             :      */
     421       13100 :     if (PointerIsValid(portal->cleanup))
     422             :     {
     423       13100 :         (*portal->cleanup) (portal);
     424       13100 :         portal->cleanup = NULL;
     425             :     }
     426       13100 : }
     427             : 
     428             : /*
     429             :  * MarkPortalFailed
     430             :  *      Transition a portal into FAILED state.
     431             :  *
     432             :  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
     433             :  */
     434             : void
     435        2072 : MarkPortalFailed(Portal portal)
     436             : {
     437             :     /* Perform the state transition */
     438        2072 :     Assert(portal->status != PORTAL_DONE);
     439        2072 :     portal->status = PORTAL_FAILED;
     440             : 
     441             :     /*
     442             :      * Allow portalcmds.c to clean up the state it knows about.  We might as
     443             :      * well do that now, since the portal can't be executed any more.
     444             :      *
     445             :      * In some cases involving cleanup of an already aborted transaction, this
     446             :      * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
     447             :      * still unexecuted.
     448             :      */
     449        2072 :     if (PointerIsValid(portal->cleanup))
     450             :     {
     451        2072 :         (*portal->cleanup) (portal);
     452        2072 :         portal->cleanup = NULL;
     453             :     }
     454        2072 : }
     455             : 
     456             : /*
     457             :  * PortalDrop
     458             :  *      Destroy the portal.
     459             :  */
     460             : void
     461       27304 : PortalDrop(Portal portal, bool isTopCommit)
     462             : {
     463       27304 :     AssertArg(PortalIsValid(portal));
     464             : 
     465             :     /*
     466             :      * Don't allow dropping a pinned portal, it's still needed by whoever
     467             :      * pinned it. Not sure if the PORTAL_ACTIVE case can validly happen or
     468             :      * not...
     469             :      */
     470       54608 :     if (portal->portalPinned ||
     471       27304 :         portal->status == PORTAL_ACTIVE)
     472           0 :         ereport(ERROR,
     473             :                 (errcode(ERRCODE_INVALID_CURSOR_STATE),
     474             :                  errmsg("cannot drop active portal \"%s\"", portal->name)));
     475             : 
     476             :     /*
     477             :      * Allow portalcmds.c to clean up the state it knows about, in particular
     478             :      * shutting down the executor if still active.  This step potentially runs
     479             :      * user-defined code so failure has to be expected.  It's the cleanup
     480             :      * hook's responsibility to not try to do that more than once, in the case
     481             :      * that failure occurs and then we come back to drop the portal again
     482             :      * during transaction abort.
     483             :      *
     484             :      * Note: in most paths of control, this will have been done already in
     485             :      * MarkPortalDone or MarkPortalFailed.  We're just making sure.
     486             :      */
     487       27304 :     if (PointerIsValid(portal->cleanup))
     488             :     {
     489       12127 :         (*portal->cleanup) (portal);
     490       12127 :         portal->cleanup = NULL;
     491             :     }
     492             : 
     493             :     /*
     494             :      * Remove portal from hash table.  Because we do this here, we will not
     495             :      * come back to try to remove the portal again if there's any error in the
     496             :      * subsequent steps.  Better to leak a little memory than to get into an
     497             :      * infinite error-recovery loop.
     498             :      */
     499       27304 :     PortalHashTableDelete(portal);
     500             : 
     501             :     /* drop cached plan reference, if any */
     502       27304 :     PortalReleaseCachedPlan(portal);
     503             : 
     504             :     /*
     505             :      * If portal has a snapshot protecting its data, release that.  This needs
     506             :      * a little care since the registration will be attached to the portal's
     507             :      * resowner; if the portal failed, we will already have released the
     508             :      * resowner (and the snapshot) during transaction abort.
     509             :      */
     510       27304 :     if (portal->holdSnapshot)
     511             :     {
     512        1406 :         if (portal->resowner)
     513        1379 :             UnregisterSnapshotFromOwner(portal->holdSnapshot,
     514             :                                         portal->resowner);
     515        1406 :         portal->holdSnapshot = NULL;
     516             :     }
     517             : 
     518             :     /*
     519             :      * Release any resources still attached to the portal.  There are several
     520             :      * cases being covered here:
     521             :      *
     522             :      * Top transaction commit (indicated by isTopCommit): normally we should
     523             :      * do nothing here and let the regular end-of-transaction resource
     524             :      * releasing mechanism handle these resources too.  However, if we have a
     525             :      * FAILED portal (eg, a cursor that got an error), we'd better clean up
     526             :      * its resources to avoid resource-leakage warning messages.
     527             :      *
     528             :      * Sub transaction commit: never comes here at all, since we don't kill
     529             :      * any portals in AtSubCommit_Portals().
     530             :      *
     531             :      * Main or sub transaction abort: we will do nothing here because
     532             :      * portal->resowner was already set NULL; the resources were already
     533             :      * cleaned up in transaction abort.
     534             :      *
     535             :      * Ordinary portal drop: must release resources.  However, if the portal
     536             :      * is not FAILED then we do not release its locks.  The locks become the
     537             :      * responsibility of the transaction's ResourceOwner (since it is the
     538             :      * parent of the portal's owner) and will be released when the transaction
     539             :      * eventually ends.
     540             :      */
     541       27304 :     if (portal->resowner &&
     542          23 :         (!isTopCommit || portal->status == PORTAL_FAILED))
     543             :     {
     544       25141 :         bool        isCommit = (portal->status != PORTAL_FAILED);
     545             : 
     546       25141 :         ResourceOwnerRelease(portal->resowner,
     547             :                              RESOURCE_RELEASE_BEFORE_LOCKS,
     548             :                              isCommit, false);
     549       25141 :         ResourceOwnerRelease(portal->resowner,
     550             :                              RESOURCE_RELEASE_LOCKS,
     551             :                              isCommit, false);
     552       25141 :         ResourceOwnerRelease(portal->resowner,
     553             :                              RESOURCE_RELEASE_AFTER_LOCKS,
     554             :                              isCommit, false);
     555       25141 :         ResourceOwnerDelete(portal->resowner);
     556             :     }
     557       27304 :     portal->resowner = NULL;
     558             : 
     559             :     /*
     560             :      * Delete tuplestore if present.  We should do this even under error
     561             :      * conditions; since the tuplestore would have been using cross-
     562             :      * transaction storage, its temp files need to be explicitly deleted.
     563             :      */
     564       27304 :     if (portal->holdStore)
     565             :     {
     566             :         MemoryContext oldcontext;
     567             : 
     568        1666 :         oldcontext = MemoryContextSwitchTo(portal->holdContext);
     569        1666 :         tuplestore_end(portal->holdStore);
     570        1666 :         MemoryContextSwitchTo(oldcontext);
     571        1666 :         portal->holdStore = NULL;
     572             :     }
     573             : 
     574             :     /* delete tuplestore storage, if any */
     575       27304 :     if (portal->holdContext)
     576        1666 :         MemoryContextDelete(portal->holdContext);
     577             : 
     578             :     /* release subsidiary storage */
     579       27304 :     MemoryContextDelete(PortalGetHeapMemory(portal));
     580             : 
     581             :     /* release portal struct (it's in PortalMemory) */
     582       27304 :     pfree(portal);
     583       27304 : }
     584             : 
     585             : /*
     586             :  * Delete all declared cursors.
     587             :  *
     588             :  * Used by commands: CLOSE ALL, DISCARD ALL
     589             :  */
     590             : void
     591           3 : PortalHashTableDeleteAll(void)
     592             : {
     593             :     HASH_SEQ_STATUS status;
     594             :     PortalHashEnt *hentry;
     595             : 
     596           3 :     if (PortalHashTable == NULL)
     597           3 :         return;
     598             : 
     599           3 :     hash_seq_init(&status, PortalHashTable);
     600          15 :     while ((hentry = hash_seq_search(&status)) != NULL)
     601             :     {
     602           9 :         Portal      portal = hentry->portal;
     603             : 
     604             :         /* Can't close the active portal (the one running the command) */
     605           9 :         if (portal->status == PORTAL_ACTIVE)
     606           5 :             continue;
     607             : 
     608           4 :         PortalDrop(portal, false);
     609             : 
     610             :         /* Restart the iteration in case that led to other drops */
     611           4 :         hash_seq_term(&status);
     612           4 :         hash_seq_init(&status, PortalHashTable);
     613             :     }
     614             : }
     615             : 
     616             : 
     617             : /*
     618             :  * Pre-commit processing for portals.
     619             :  *
     620             :  * Holdable cursors created in this transaction need to be converted to
     621             :  * materialized form, since we are going to close down the executor and
     622             :  * release locks.  Non-holdable portals created in this transaction are
     623             :  * simply removed.  Portals remaining from prior transactions should be
     624             :  * left untouched.
     625             :  *
     626             :  * Returns TRUE if any portals changed state (possibly causing user-defined
     627             :  * code to be run), FALSE if not.
     628             :  */
     629             : bool
     630       22908 : PreCommit_Portals(bool isPrepare)
     631             : {
     632       22908 :     bool        result = false;
     633             :     HASH_SEQ_STATUS status;
     634             :     PortalHashEnt *hentry;
     635             : 
     636       22908 :     hash_seq_init(&status, PortalHashTable);
     637             : 
     638       46454 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
     639             :     {
     640         638 :         Portal      portal = hentry->portal;
     641             : 
     642             :         /*
     643             :          * There should be no pinned portals anymore. Complain if someone
     644             :          * leaked one.
     645             :          */
     646         638 :         if (portal->portalPinned)
     647           0 :             elog(ERROR, "cannot commit while a portal is pinned");
     648             : 
     649             :         /*
     650             :          * Do not touch active portals --- this can only happen in the case of
     651             :          * a multi-transaction utility command, such as VACUUM.
     652             :          *
     653             :          * Note however that any resource owner attached to such a portal is
     654             :          * still going to go away, so don't leave a dangling pointer.
     655             :          */
     656         638 :         if (portal->status == PORTAL_ACTIVE)
     657             :         {
     658         565 :             portal->resowner = NULL;
     659         565 :             continue;
     660             :         }
     661             : 
     662             :         /* Is it a holdable portal created in the current xact? */
     663         123 :         if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
     664          55 :             portal->createSubid != InvalidSubTransactionId &&
     665           5 :             portal->status == PORTAL_READY)
     666             :         {
     667             :             /*
     668             :              * We are exiting the transaction that created a holdable cursor.
     669             :              * Instead of dropping the portal, prepare it for access by later
     670             :              * transactions.
     671             :              *
     672             :              * However, if this is PREPARE TRANSACTION rather than COMMIT,
     673             :              * refuse PREPARE, because the semantics seem pretty unclear.
     674             :              */
     675           5 :             if (isPrepare)
     676           0 :                 ereport(ERROR,
     677             :                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     678             :                          errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
     679             : 
     680             :             /*
     681             :              * Note that PersistHoldablePortal() must release all resources
     682             :              * used by the portal that are local to the creating transaction.
     683             :              */
     684           5 :             PortalCreateHoldStore(portal);
     685           5 :             PersistHoldablePortal(portal);
     686             : 
     687             :             /* drop cached plan reference, if any */
     688           5 :             PortalReleaseCachedPlan(portal);
     689             : 
     690             :             /*
     691             :              * Any resources belonging to the portal will be released in the
     692             :              * upcoming transaction-wide cleanup; the portal will no longer
     693             :              * have its own resources.
     694             :              */
     695           5 :             portal->resowner = NULL;
     696             : 
     697             :             /*
     698             :              * Having successfully exported the holdable cursor, mark it as
     699             :              * not belonging to this transaction.
     700             :              */
     701           5 :             portal->createSubid = InvalidSubTransactionId;
     702           5 :             portal->activeSubid = InvalidSubTransactionId;
     703             : 
     704             :             /* Report we changed state */
     705           5 :             result = true;
     706             :         }
     707          68 :         else if (portal->createSubid == InvalidSubTransactionId)
     708             :         {
     709             :             /*
     710             :              * Do nothing to cursors held over from a previous transaction
     711             :              * (including ones we just froze in a previous cycle of this loop)
     712             :              */
     713          45 :             continue;
     714             :         }
     715             :         else
     716             :         {
     717             :             /* Zap all non-holdable portals */
     718          23 :             PortalDrop(portal, true);
     719             : 
     720             :             /* Report we changed state */
     721          23 :             result = true;
     722             :         }
     723             : 
     724             :         /*
     725             :          * After either freezing or dropping a portal, we have to restart the
     726             :          * iteration, because we could have invoked user-defined code that
     727             :          * caused a drop of the next portal in the hash chain.
     728             :          */
     729          28 :         hash_seq_term(&status);
     730          28 :         hash_seq_init(&status, PortalHashTable);
     731             :     }
     732             : 
     733       22908 :     return result;
     734             : }
     735             : 
     736             : /*
     737             :  * Abort processing for portals.
     738             :  *
     739             :  * At this point we reset "active" status and run the cleanup hook if
     740             :  * present, but we can't release the portal's memory until the cleanup call.
     741             :  *
     742             :  * The reason we need to reset active is so that we can replace the unnamed
     743             :  * portal, else we'll fail to execute ROLLBACK when it arrives.
     744             :  */
     745             : void
     746        3279 : AtAbort_Portals(void)
     747             : {
     748             :     HASH_SEQ_STATUS status;
     749             :     PortalHashEnt *hentry;
     750             : 
     751        3279 :     hash_seq_init(&status, PortalHashTable);
     752             : 
     753        8634 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
     754             :     {
     755        2076 :         Portal      portal = hentry->portal;
     756             : 
     757             :         /*
     758             :          * See similar code in AtSubAbort_Portals().  This would fire if code
     759             :          * orchestrating multiple top-level transactions within a portal, such
     760             :          * as VACUUM, caught errors and continued under the same portal with a
     761             :          * fresh transaction.  No part of core PostgreSQL functions that way.
     762             :          * XXX Such code would wish the portal to remain ACTIVE, as in
     763             :          * PreCommit_Portals().
     764             :          */
     765        2076 :         if (portal->status == PORTAL_ACTIVE)
     766           0 :             MarkPortalFailed(portal);
     767             : 
     768             :         /*
     769             :          * Do nothing else to cursors held over from a previous transaction.
     770             :          */
     771        2076 :         if (portal->createSubid == InvalidSubTransactionId)
     772          23 :             continue;
     773             : 
     774             :         /*
     775             :          * If it was created in the current transaction, we can't do normal
     776             :          * shutdown on a READY portal either; it might refer to objects
     777             :          * created in the failed transaction.  See comments in
     778             :          * AtSubAbort_Portals.
     779             :          */
     780        2053 :         if (portal->status == PORTAL_READY)
     781          31 :             MarkPortalFailed(portal);
     782             : 
     783             :         /*
     784             :          * Allow portalcmds.c to clean up the state it knows about, if we
     785             :          * haven't already.
     786             :          */
     787        2053 :         if (PointerIsValid(portal->cleanup))
     788             :         {
     789           5 :             (*portal->cleanup) (portal);
     790           5 :             portal->cleanup = NULL;
     791             :         }
     792             : 
     793             :         /* drop cached plan reference, if any */
     794        2053 :         PortalReleaseCachedPlan(portal);
     795             : 
     796             :         /*
     797             :          * Any resources belonging to the portal will be released in the
     798             :          * upcoming transaction-wide cleanup; they will be gone before we run
     799             :          * PortalDrop.
     800             :          */
     801        2053 :         portal->resowner = NULL;
     802             : 
     803             :         /*
     804             :          * Although we can't delete the portal data structure proper, we can
     805             :          * release any memory in subsidiary contexts, such as executor state.
     806             :          * The cleanup hook was the last thing that might have needed data
     807             :          * there.
     808             :          */
     809        2053 :         MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
     810             :     }
     811        3279 : }
     812             : 
     813             : /*
     814             :  * Post-abort cleanup for portals.
     815             :  *
     816             :  * Delete all portals not held over from prior transactions.  */
     817             : void
     818        3278 : AtCleanup_Portals(void)
     819             : {
     820             :     HASH_SEQ_STATUS status;
     821             :     PortalHashEnt *hentry;
     822             : 
     823        3278 :     hash_seq_init(&status, PortalHashTable);
     824             : 
     825        8562 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
     826             :     {
     827        2006 :         Portal      portal = hentry->portal;
     828             : 
     829             :         /* Do nothing to cursors held over from a previous transaction */
     830        2006 :         if (portal->createSubid == InvalidSubTransactionId)
     831             :         {
     832          23 :             Assert(portal->status != PORTAL_ACTIVE);
     833          23 :             Assert(portal->resowner == NULL);
     834          23 :             continue;
     835             :         }
     836             : 
     837             :         /*
     838             :          * If a portal is still pinned, forcibly unpin it. PortalDrop will not
     839             :          * let us drop the portal otherwise. Whoever pinned the portal was
     840             :          * interrupted by the abort too and won't try to use it anymore.
     841             :          */
     842        1983 :         if (portal->portalPinned)
     843           3 :             portal->portalPinned = false;
     844             : 
     845             :         /*
     846             :          * We had better not call any user-defined code during cleanup, so if
     847             :          * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
     848             :          */
     849        1983 :         if (PointerIsValid(portal->cleanup))
     850             :         {
     851           0 :             elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
     852           0 :             portal->cleanup = NULL;
     853             :         }
     854             : 
     855             :         /* Zap it. */
     856        1983 :         PortalDrop(portal, false);
     857             :     }
     858        3278 : }
     859             : 
     860             : /*
     861             :  * Pre-subcommit processing for portals.
     862             :  *
     863             :  * Reassign portals created or used in the current subtransaction to the
     864             :  * parent subtransaction.
     865             :  */
     866             : void
     867          49 : AtSubCommit_Portals(SubTransactionId mySubid,
     868             :                     SubTransactionId parentSubid,
     869             :                     ResourceOwner parentXactOwner)
     870             : {
     871             :     HASH_SEQ_STATUS status;
     872             :     PortalHashEnt *hentry;
     873             : 
     874          49 :     hash_seq_init(&status, PortalHashTable);
     875             : 
     876         109 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
     877             :     {
     878          11 :         Portal      portal = hentry->portal;
     879             : 
     880          11 :         if (portal->createSubid == mySubid)
     881             :         {
     882           0 :             portal->createSubid = parentSubid;
     883           0 :             if (portal->resowner)
     884           0 :                 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
     885             :         }
     886          11 :         if (portal->activeSubid == mySubid)
     887           1 :             portal->activeSubid = parentSubid;
     888             :     }
     889          49 : }
     890             : 
     891             : /*
     892             :  * Subtransaction abort handling for portals.
     893             :  *
     894             :  * Deactivate portals created or used during the failed subtransaction.
     895             :  * Note that per AtSubCommit_Portals, this will catch portals created/used
     896             :  * in descendants of the subtransaction too.
     897             :  *
     898             :  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
     899             :  */
     900             : void
     901         323 : AtSubAbort_Portals(SubTransactionId mySubid,
     902             :                    SubTransactionId parentSubid,
     903             :                    ResourceOwner myXactOwner,
     904             :                    ResourceOwner parentXactOwner)
     905             : {
     906             :     HASH_SEQ_STATUS status;
     907             :     PortalHashEnt *hentry;
     908             : 
     909         323 :     hash_seq_init(&status, PortalHashTable);
     910             : 
     911        1504 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
     912             :     {
     913         858 :         Portal      portal = hentry->portal;
     914             : 
     915             :         /* Was it created in this subtransaction? */
     916         858 :         if (portal->createSubid != mySubid)
     917             :         {
     918             :             /* No, but maybe it was used in this subtransaction? */
     919         834 :             if (portal->activeSubid == mySubid)
     920             :             {
     921             :                 /* Maintain activeSubid until the portal is removed */
     922           7 :                 portal->activeSubid = parentSubid;
     923             : 
     924             :                 /*
     925             :                  * A MarkPortalActive() caller ran an upper-level portal in
     926             :                  * this subtransaction and left the portal ACTIVE.  This can't
     927             :                  * happen, but force the portal into FAILED state for the same
     928             :                  * reasons discussed below.
     929             :                  *
     930             :                  * We assume we can get away without forcing upper-level READY
     931             :                  * portals to fail, even if they were run and then suspended.
     932             :                  * In theory a suspended upper-level portal could have
     933             :                  * acquired some references to objects that are about to be
     934             :                  * destroyed, but there should be sufficient defenses against
     935             :                  * such cases: the portal's original query cannot contain such
     936             :                  * references, and any references within, say, cached plans of
     937             :                  * PL/pgSQL functions are not from active queries and should
     938             :                  * be protected by revalidation logic.
     939             :                  */
     940           7 :                 if (portal->status == PORTAL_ACTIVE)
     941           0 :                     MarkPortalFailed(portal);
     942             : 
     943             :                 /*
     944             :                  * Also, if we failed it during the current subtransaction
     945             :                  * (either just above, or earlier), reattach its resource
     946             :                  * owner to the current subtransaction's resource owner, so
     947             :                  * that any resources it still holds will be released while
     948             :                  * cleaning up this subtransaction.  This prevents some corner
     949             :                  * cases wherein we might get Asserts or worse while cleaning
     950             :                  * up objects created during the current subtransaction
     951             :                  * (because they're still referenced within this portal).
     952             :                  */
     953           7 :                 if (portal->status == PORTAL_FAILED && portal->resowner)
     954             :                 {
     955           2 :                     ResourceOwnerNewParent(portal->resowner, myXactOwner);
     956           2 :                     portal->resowner = NULL;
     957             :                 }
     958             :             }
     959             :             /* Done if it wasn't created in this subtransaction */
     960         834 :             continue;
     961             :         }
     962             : 
     963             :         /*
     964             :          * Force any live portals of my own subtransaction into FAILED state.
     965             :          * We have to do this because they might refer to objects created or
     966             :          * changed in the failed subtransaction, leading to crashes within
     967             :          * ExecutorEnd when portalcmds.c tries to close down the portal.
     968             :          * Currently, every MarkPortalActive() caller ensures it updates the
     969             :          * portal status again before relinquishing control, so ACTIVE can't
     970             :          * happen here.  If it does happen, dispose the portal like existing
     971             :          * MarkPortalActive() callers would.
     972             :          */
     973          47 :         if (portal->status == PORTAL_READY ||
     974          23 :             portal->status == PORTAL_ACTIVE)
     975           1 :             MarkPortalFailed(portal);
     976             : 
     977             :         /*
     978             :          * Allow portalcmds.c to clean up the state it knows about, if we
     979             :          * haven't already.
     980             :          */
     981          24 :         if (PointerIsValid(portal->cleanup))
     982             :         {
     983           0 :             (*portal->cleanup) (portal);
     984           0 :             portal->cleanup = NULL;
     985             :         }
     986             : 
     987             :         /* drop cached plan reference, if any */
     988          24 :         PortalReleaseCachedPlan(portal);
     989             : 
     990             :         /*
     991             :          * Any resources belonging to the portal will be released in the
     992             :          * upcoming transaction-wide cleanup; they will be gone before we run
     993             :          * PortalDrop.
     994             :          */
     995          24 :         portal->resowner = NULL;
     996             : 
     997             :         /*
     998             :          * Although we can't delete the portal data structure proper, we can
     999             :          * release any memory in subsidiary contexts, such as executor state.
    1000             :          * The cleanup hook was the last thing that might have needed data
    1001             :          * there.
    1002             :          */
    1003          24 :         MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
    1004             :     }
    1005         323 : }
    1006             : 
    1007             : /*
    1008             :  * Post-subabort cleanup for portals.
    1009             :  *
    1010             :  * Drop all portals created in the failed subtransaction (but note that
    1011             :  * we will not drop any that were reassigned to the parent above).
    1012             :  */
    1013             : void
    1014         323 : AtSubCleanup_Portals(SubTransactionId mySubid)
    1015             : {
    1016             :     HASH_SEQ_STATUS status;
    1017             :     PortalHashEnt *hentry;
    1018             : 
    1019         323 :     hash_seq_init(&status, PortalHashTable);
    1020             : 
    1021        1481 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
    1022             :     {
    1023         835 :         Portal      portal = hentry->portal;
    1024             : 
    1025         835 :         if (portal->createSubid != mySubid)
    1026         834 :             continue;
    1027             : 
    1028             :         /*
    1029             :          * If a portal is still pinned, forcibly unpin it. PortalDrop will not
    1030             :          * let us drop the portal otherwise. Whoever pinned the portal was
    1031             :          * interrupted by the abort too and won't try to use it anymore.
    1032             :          */
    1033           1 :         if (portal->portalPinned)
    1034           0 :             portal->portalPinned = false;
    1035             : 
    1036             :         /*
    1037             :          * We had better not call any user-defined code during cleanup, so if
    1038             :          * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
    1039             :          */
    1040           1 :         if (PointerIsValid(portal->cleanup))
    1041             :         {
    1042           0 :             elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
    1043           0 :             portal->cleanup = NULL;
    1044             :         }
    1045             : 
    1046             :         /* Zap it. */
    1047           1 :         PortalDrop(portal, false);
    1048             :     }
    1049         323 : }
    1050             : 
    1051             : /* Find all available cursors */
    1052             : Datum
    1053          14 : pg_cursor(PG_FUNCTION_ARGS)
    1054             : {
    1055          14 :     ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
    1056             :     TupleDesc   tupdesc;
    1057             :     Tuplestorestate *tupstore;
    1058             :     MemoryContext per_query_ctx;
    1059             :     MemoryContext oldcontext;
    1060             :     HASH_SEQ_STATUS hash_seq;
    1061             :     PortalHashEnt *hentry;
    1062             : 
    1063             :     /* check to see if caller supports us returning a tuplestore */
    1064          14 :     if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
    1065           0 :         ereport(ERROR,
    1066             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1067             :                  errmsg("set-valued function called in context that cannot accept a set")));
    1068          14 :     if (!(rsinfo->allowedModes & SFRM_Materialize))
    1069           0 :         ereport(ERROR,
    1070             :                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1071             :                  errmsg("materialize mode required, but it is not " \
    1072             :                         "allowed in this context")));
    1073             : 
    1074             :     /* need to build tuplestore in query context */
    1075          14 :     per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
    1076          14 :     oldcontext = MemoryContextSwitchTo(per_query_ctx);
    1077             : 
    1078             :     /*
    1079             :      * build tupdesc for result tuples. This must match the definition of the
    1080             :      * pg_cursors view in system_views.sql
    1081             :      */
    1082          14 :     tupdesc = CreateTemplateTupleDesc(6, false);
    1083          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
    1084             :                        TEXTOID, -1, 0);
    1085          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
    1086             :                        TEXTOID, -1, 0);
    1087          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
    1088             :                        BOOLOID, -1, 0);
    1089          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
    1090             :                        BOOLOID, -1, 0);
    1091          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
    1092             :                        BOOLOID, -1, 0);
    1093          14 :     TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
    1094             :                        TIMESTAMPTZOID, -1, 0);
    1095             : 
    1096             :     /*
    1097             :      * We put all the tuples into a tuplestore in one scan of the hashtable.
    1098             :      * This avoids any issue of the hashtable possibly changing between calls.
    1099             :      */
    1100          14 :     tupstore =
    1101          14 :         tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
    1102             :                               false, work_mem);
    1103             : 
    1104             :     /* generate junk in short-term context */
    1105          14 :     MemoryContextSwitchTo(oldcontext);
    1106             : 
    1107          14 :     hash_seq_init(&hash_seq, PortalHashTable);
    1108          63 :     while ((hentry = hash_seq_search(&hash_seq)) != NULL)
    1109             :     {
    1110          35 :         Portal      portal = hentry->portal;
    1111             :         Datum       values[6];
    1112             :         bool        nulls[6];
    1113             : 
    1114             :         /* report only "visible" entries */
    1115          35 :         if (!portal->visible)
    1116          15 :             continue;
    1117             : 
    1118          20 :         MemSet(nulls, 0, sizeof(nulls));
    1119             : 
    1120          20 :         values[0] = CStringGetTextDatum(portal->name);
    1121          20 :         values[1] = CStringGetTextDatum(portal->sourceText);
    1122          20 :         values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
    1123          20 :         values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
    1124          20 :         values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
    1125          20 :         values[5] = TimestampTzGetDatum(portal->creation_time);
    1126             : 
    1127          20 :         tuplestore_putvalues(tupstore, tupdesc, values, nulls);
    1128             :     }
    1129             : 
    1130             :     /* clean up and return the tuplestore */
    1131             :     tuplestore_donestoring(tupstore);
    1132             : 
    1133          14 :     rsinfo->returnMode = SFRM_Materialize;
    1134          14 :     rsinfo->setResult = tupstore;
    1135          14 :     rsinfo->setDesc = tupdesc;
    1136             : 
    1137          14 :     return (Datum) 0;
    1138             : }
    1139             : 
    1140             : bool
    1141           7 : ThereAreNoReadyPortals(void)
    1142             : {
    1143             :     HASH_SEQ_STATUS status;
    1144             :     PortalHashEnt *hentry;
    1145             : 
    1146           7 :     hash_seq_init(&status, PortalHashTable);
    1147             : 
    1148           7 :     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
    1149             :     {
    1150           7 :         Portal      portal = hentry->portal;
    1151             : 
    1152           7 :         if (portal->status == PORTAL_READY)
    1153           0 :             return false;
    1154             :     }
    1155             : 
    1156           7 :     return true;
    1157             : }

Generated by: LCOV version 1.11