LCOV - code coverage report
Current view: top level - src/backend/executor - nodeModifyTable.c (source / functions) Hit Total Coverage
Test: PostgreSQL Lines: 588 681 86.3 %
Date: 2017-09-29 15:12:54 Functions: 15 16 93.8 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeModifyTable.c
       4             :  *    routines to handle ModifyTable nodes.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeModifyTable.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /* INTERFACE ROUTINES
      16             :  *      ExecInitModifyTable - initialize the ModifyTable node
      17             :  *      ExecModifyTable     - retrieve the next tuple from the node
      18             :  *      ExecEndModifyTable  - shut down the ModifyTable node
      19             :  *      ExecReScanModifyTable - rescan the ModifyTable node
      20             :  *
      21             :  *   NOTES
      22             :  *      Each ModifyTable node contains a list of one or more subplans,
      23             :  *      much like an Append node.  There is one subplan per result relation.
      24             :  *      The key reason for this is that in an inherited UPDATE command, each
      25             :  *      result relation could have a different schema (more or different
      26             :  *      columns) requiring a different plan tree to produce it.  In an
      27             :  *      inherited DELETE, all the subplans should produce the same output
      28             :  *      rowtype, but we might still find that different plans are appropriate
      29             :  *      for different child relations.
      30             :  *
      31             :  *      If the query specifies RETURNING, then the ModifyTable returns a
      32             :  *      RETURNING tuple after completing each row insert, update, or delete.
      33             :  *      It must be called again to continue the operation.  Without RETURNING,
      34             :  *      we just loop within the node until all the work is done, then
      35             :  *      return NULL.  This avoids useless call/return overhead.
      36             :  */
      37             : 
      38             : #include "postgres.h"
      39             : 
      40             : #include "access/htup_details.h"
      41             : #include "access/xact.h"
      42             : #include "commands/trigger.h"
      43             : #include "executor/executor.h"
      44             : #include "executor/nodeModifyTable.h"
      45             : #include "foreign/fdwapi.h"
      46             : #include "miscadmin.h"
      47             : #include "nodes/nodeFuncs.h"
      48             : #include "parser/parsetree.h"
      49             : #include "storage/bufmgr.h"
      50             : #include "storage/lmgr.h"
      51             : #include "utils/builtins.h"
      52             : #include "utils/memutils.h"
      53             : #include "utils/rel.h"
      54             : #include "utils/tqual.h"
      55             : 
      56             : 
      57             : static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
      58             :                      ResultRelInfo *resultRelInfo,
      59             :                      ItemPointer conflictTid,
      60             :                      TupleTableSlot *planSlot,
      61             :                      TupleTableSlot *excludedSlot,
      62             :                      EState *estate,
      63             :                      bool canSetTag,
      64             :                      TupleTableSlot **returning);
      65             : 
      66             : /*
      67             :  * Verify that the tuples to be produced by INSERT or UPDATE match the
      68             :  * target relation's rowtype
      69             :  *
      70             :  * We do this to guard against stale plans.  If plan invalidation is
      71             :  * functioning properly then we should never get a failure here, but better
      72             :  * safe than sorry.  Note that this is called after we have obtained lock
      73             :  * on the target rel, so the rowtype can't change underneath us.
      74             :  *
      75             :  * The plan output is represented by its targetlist, because that makes
      76             :  * handling the dropped-column case easier.
      77             :  */
      78             : static void
      79        4267 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
      80             : {
      81        4267 :     TupleDesc   resultDesc = RelationGetDescr(resultRel);
      82        4267 :     int         attno = 0;
      83             :     ListCell   *lc;
      84             : 
      85       16044 :     foreach(lc, targetList)
      86             :     {
      87       11777 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
      88             :         Form_pg_attribute attr;
      89             : 
      90       11777 :         if (tle->resjunk)
      91         898 :             continue;           /* ignore junk tlist items */
      92             : 
      93       10879 :         if (attno >= resultDesc->natts)
      94           0 :             ereport(ERROR,
      95             :                     (errcode(ERRCODE_DATATYPE_MISMATCH),
      96             :                      errmsg("table row type and query-specified row type do not match"),
      97             :                      errdetail("Query has too many columns.")));
      98       10879 :         attr = TupleDescAttr(resultDesc, attno);
      99       10879 :         attno++;
     100             : 
     101       10879 :         if (!attr->attisdropped)
     102             :         {
     103             :             /* Normal case: demand type match */
     104       10845 :             if (exprType((Node *) tle->expr) != attr->atttypid)
     105           0 :                 ereport(ERROR,
     106             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     107             :                          errmsg("table row type and query-specified row type do not match"),
     108             :                          errdetail("Table has type %s at ordinal position %d, but query expects %s.",
     109             :                                    format_type_be(attr->atttypid),
     110             :                                    attno,
     111             :                                    format_type_be(exprType((Node *) tle->expr)))));
     112             :         }
     113             :         else
     114             :         {
     115             :             /*
     116             :              * For a dropped column, we can't check atttypid (it's likely 0).
     117             :              * In any case the planner has most likely inserted an INT4 null.
     118             :              * What we insist on is just *some* NULL constant.
     119             :              */
     120          68 :             if (!IsA(tle->expr, Const) ||
     121          34 :                 !((Const *) tle->expr)->constisnull)
     122           0 :                 ereport(ERROR,
     123             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     124             :                          errmsg("table row type and query-specified row type do not match"),
     125             :                          errdetail("Query provides a value for a dropped column at ordinal position %d.",
     126             :                                    attno)));
     127             :         }
     128             :     }
     129        4267 :     if (attno != resultDesc->natts)
     130           0 :         ereport(ERROR,
     131             :                 (errcode(ERRCODE_DATATYPE_MISMATCH),
     132             :                  errmsg("table row type and query-specified row type do not match"),
     133             :                  errdetail("Query has too few columns.")));
     134        4267 : }
     135             : 
     136             : /*
     137             :  * ExecProcessReturning --- evaluate a RETURNING list
     138             :  *
     139             :  * projectReturning: RETURNING projection info for current result rel
     140             :  * tupleSlot: slot holding tuple actually inserted/updated/deleted
     141             :  * planSlot: slot holding tuple returned by top subplan node
     142             :  *
     143             :  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
     144             :  * scan tuple.
     145             :  *
     146             :  * Returns a slot holding the result tuple
     147             :  */
     148             : static TupleTableSlot *
     149         526 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
     150             :                      TupleTableSlot *tupleSlot,
     151             :                      TupleTableSlot *planSlot)
     152             : {
     153         526 :     ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
     154         526 :     ExprContext *econtext = projectReturning->pi_exprContext;
     155             : 
     156             :     /*
     157             :      * Reset per-tuple memory context to free any expression evaluation
     158             :      * storage allocated in the previous cycle.
     159             :      */
     160         526 :     ResetExprContext(econtext);
     161             : 
     162             :     /* Make tuple and any needed join variables available to ExecProject */
     163         526 :     if (tupleSlot)
     164         526 :         econtext->ecxt_scantuple = tupleSlot;
     165             :     else
     166             :     {
     167             :         HeapTuple   tuple;
     168             : 
     169             :         /*
     170             :          * RETURNING expressions might reference the tableoid column, so
     171             :          * initialize t_tableOid before evaluating them.
     172             :          */
     173           0 :         Assert(!TupIsNull(econtext->ecxt_scantuple));
     174           0 :         tuple = ExecMaterializeSlot(econtext->ecxt_scantuple);
     175           0 :         tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
     176             :     }
     177         526 :     econtext->ecxt_outertuple = planSlot;
     178             : 
     179             :     /* Compute the RETURNING expressions */
     180         526 :     return ExecProject(projectReturning);
     181             : }
     182             : 
     183             : /*
     184             :  * ExecCheckHeapTupleVisible -- verify heap tuple is visible
     185             :  *
     186             :  * It would not be consistent with guarantees of the higher isolation levels to
     187             :  * proceed with avoiding insertion (taking speculative insertion's alternative
     188             :  * path) on the basis of another tuple that is not visible to MVCC snapshot.
     189             :  * Check for the need to raise a serialization failure, and do so as necessary.
     190             :  */
     191             : static void
     192          73 : ExecCheckHeapTupleVisible(EState *estate,
     193             :                           HeapTuple tuple,
     194             :                           Buffer buffer)
     195             : {
     196          73 :     if (!IsolationUsesXactSnapshot())
     197         144 :         return;
     198             : 
     199             :     /*
     200             :      * We need buffer pin and lock to call HeapTupleSatisfiesVisibility.
     201             :      * Caller should be holding pin, but not lock.
     202             :      */
     203           2 :     LockBuffer(buffer, BUFFER_LOCK_SHARE);
     204           2 :     if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer))
     205             :     {
     206             :         /*
     207             :          * We should not raise a serialization failure if the conflict is
     208             :          * against a tuple inserted by our own transaction, even if it's not
     209             :          * visible to our snapshot.  (This would happen, for example, if
     210             :          * conflicting keys are proposed for insertion in a single command.)
     211             :          */
     212           2 :         if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
     213           0 :             ereport(ERROR,
     214             :                     (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     215             :                      errmsg("could not serialize access due to concurrent update")));
     216             :     }
     217           2 :     LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
     218             : }
     219             : 
     220             : /*
     221             :  * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible()
     222             :  */
     223             : static void
     224          13 : ExecCheckTIDVisible(EState *estate,
     225             :                     ResultRelInfo *relinfo,
     226             :                     ItemPointer tid)
     227             : {
     228          13 :     Relation    rel = relinfo->ri_RelationDesc;
     229             :     Buffer      buffer;
     230             :     HeapTupleData tuple;
     231             : 
     232             :     /* Redundantly check isolation level */
     233          13 :     if (!IsolationUsesXactSnapshot())
     234          24 :         return;
     235             : 
     236           2 :     tuple.t_self = *tid;
     237           2 :     if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL))
     238           0 :         elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
     239           2 :     ExecCheckHeapTupleVisible(estate, &tuple, buffer);
     240           2 :     ReleaseBuffer(buffer);
     241             : }
     242             : 
     243             : /* ----------------------------------------------------------------
     244             :  *      ExecInsert
     245             :  *
     246             :  *      For INSERT, we have to insert the tuple into the target relation
     247             :  *      and insert appropriate tuples into the index relations.
     248             :  *
     249             :  *      Returns RETURNING result if any, otherwise NULL.
     250             :  * ----------------------------------------------------------------
     251             :  */
     252             : static TupleTableSlot *
     253      472314 : ExecInsert(ModifyTableState *mtstate,
     254             :            TupleTableSlot *slot,
     255             :            TupleTableSlot *planSlot,
     256             :            List *arbiterIndexes,
     257             :            OnConflictAction onconflict,
     258             :            EState *estate,
     259             :            bool canSetTag)
     260             : {
     261             :     HeapTuple   tuple;
     262             :     ResultRelInfo *resultRelInfo;
     263      472314 :     ResultRelInfo *saved_resultRelInfo = NULL;
     264             :     Relation    resultRelationDesc;
     265             :     Oid         newId;
     266      472314 :     List       *recheckIndexes = NIL;
     267      472314 :     TupleTableSlot *result = NULL;
     268             : 
     269             :     /*
     270             :      * get the heap tuple out of the tuple table slot, making sure we have a
     271             :      * writable copy
     272             :      */
     273      472314 :     tuple = ExecMaterializeSlot(slot);
     274             : 
     275             :     /*
     276             :      * get information on the (current) result relation
     277             :      */
     278      472314 :     resultRelInfo = estate->es_result_relation_info;
     279             : 
     280             :     /* Determine the partition to heap_insert the tuple into */
     281      472314 :     if (mtstate->mt_partition_dispatch_info)
     282             :     {
     283             :         int         leaf_part_index;
     284             :         TupleConversionMap *map;
     285             : 
     286             :         /*
     287             :          * Away we go ... If we end up not finding a partition after all,
     288             :          * ExecFindPartition() does not return and errors out instead.
     289             :          * Otherwise, the returned value is to be used as an index into arrays
     290             :          * mt_partitions[] and mt_partition_tupconv_maps[] that will get us
     291             :          * the ResultRelInfo and TupleConversionMap for the partition,
     292             :          * respectively.
     293             :          */
     294         167 :         leaf_part_index = ExecFindPartition(resultRelInfo,
     295         167 :                                             mtstate->mt_partition_dispatch_info,
     296             :                                             slot,
     297             :                                             estate);
     298         155 :         Assert(leaf_part_index >= 0 &&
     299             :                leaf_part_index < mtstate->mt_num_partitions);
     300             : 
     301             :         /*
     302             :          * Save the old ResultRelInfo and switch to the one corresponding to
     303             :          * the selected partition.
     304             :          */
     305         155 :         saved_resultRelInfo = resultRelInfo;
     306         155 :         resultRelInfo = mtstate->mt_partitions + leaf_part_index;
     307             : 
     308             :         /* We do not yet have a way to insert into a foreign partition */
     309         155 :         if (resultRelInfo->ri_FdwRoutine)
     310           0 :             ereport(ERROR,
     311             :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
     312             :                      errmsg("cannot route inserted tuples to a foreign table")));
     313             : 
     314             :         /* For ExecInsertIndexTuples() to work on the partition's indexes */
     315         155 :         estate->es_result_relation_info = resultRelInfo;
     316             : 
     317             :         /*
     318             :          * If we're capturing transition tuples, we might need to convert from
     319             :          * the partition rowtype to parent rowtype.
     320             :          */
     321         155 :         if (mtstate->mt_transition_capture != NULL)
     322             :         {
     323          10 :             if (resultRelInfo->ri_TrigDesc &&
     324           7 :                 (resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
     325           3 :                  resultRelInfo->ri_TrigDesc->trig_insert_instead_row))
     326             :             {
     327             :                 /*
     328             :                  * If there are any BEFORE or INSTEAD triggers on the
     329             :                  * partition, we'll have to be ready to convert their result
     330             :                  * back to tuplestore format.
     331             :                  */
     332           1 :                 mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
     333           2 :                 mtstate->mt_transition_capture->tcs_map =
     334           1 :                     mtstate->mt_transition_tupconv_maps[leaf_part_index];
     335             :             }
     336             :             else
     337             :             {
     338             :                 /*
     339             :                  * Otherwise, just remember the original unconverted tuple, to
     340             :                  * avoid a needless round trip conversion.
     341             :                  */
     342           5 :                 mtstate->mt_transition_capture->tcs_original_insert_tuple = tuple;
     343           5 :                 mtstate->mt_transition_capture->tcs_map = NULL;
     344             :             }
     345             :         }
     346             : 
     347             :         /*
     348             :          * We might need to convert from the parent rowtype to the partition
     349             :          * rowtype.
     350             :          */
     351         155 :         map = mtstate->mt_partition_tupconv_maps[leaf_part_index];
     352         155 :         if (map)
     353             :         {
     354          43 :             Relation    partrel = resultRelInfo->ri_RelationDesc;
     355             : 
     356          43 :             tuple = do_convert_tuple(tuple, map);
     357             : 
     358             :             /*
     359             :              * We must use the partition's tuple descriptor from this point
     360             :              * on, until we're finished dealing with the partition. Use the
     361             :              * dedicated slot for that.
     362             :              */
     363          43 :             slot = mtstate->mt_partition_tuple_slot;
     364          43 :             Assert(slot != NULL);
     365          43 :             ExecSetSlotDescriptor(slot, RelationGetDescr(partrel));
     366          43 :             ExecStoreTuple(tuple, slot, InvalidBuffer, true);
     367             :         }
     368             :     }
     369             : 
     370      472302 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     371             : 
     372             :     /*
     373             :      * If the result relation has OIDs, force the tuple's OID to zero so that
     374             :      * heap_insert will assign a fresh OID.  Usually the OID already will be
     375             :      * zero at this point, but there are corner cases where the plan tree can
     376             :      * return a tuple extracted literally from some table with the same
     377             :      * rowtype.
     378             :      *
     379             :      * XXX if we ever wanted to allow users to assign their own OIDs to new
     380             :      * rows, this'd be the place to do it.  For the moment, we make a point of
     381             :      * doing this before calling triggers, so that a user-supplied trigger
     382             :      * could hack the OID if desired.
     383             :      */
     384      472302 :     if (resultRelationDesc->rd_rel->relhasoids)
     385        9158 :         HeapTupleSetOid(tuple, InvalidOid);
     386             : 
     387             :     /*
     388             :      * BEFORE ROW INSERT Triggers.
     389             :      *
     390             :      * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
     391             :      * INSERT ... ON CONFLICT statement.  We cannot check for constraint
     392             :      * violations before firing these triggers, because they can change the
     393             :      * values to insert.  Also, they can run arbitrary user-defined code with
     394             :      * side-effects that we can't cancel by just not inserting the tuple.
     395             :      */
     396      484026 :     if (resultRelInfo->ri_TrigDesc &&
     397       11724 :         resultRelInfo->ri_TrigDesc->trig_insert_before_row)
     398             :     {
     399         214 :         slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
     400             : 
     401         202 :         if (slot == NULL)       /* "do nothing" */
     402           0 :             return NULL;
     403             : 
     404             :         /* trigger might have changed tuple */
     405         202 :         tuple = ExecMaterializeSlot(slot);
     406             :     }
     407             : 
     408             :     /* INSTEAD OF ROW INSERT Triggers */
     409      484002 :     if (resultRelInfo->ri_TrigDesc &&
     410       11712 :         resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
     411             :     {
     412          14 :         slot = ExecIRInsertTriggers(estate, resultRelInfo, slot);
     413             : 
     414          14 :         if (slot == NULL)       /* "do nothing" */
     415           1 :             return NULL;
     416             : 
     417             :         /* trigger might have changed tuple */
     418          13 :         tuple = ExecMaterializeSlot(slot);
     419             : 
     420          13 :         newId = InvalidOid;
     421             :     }
     422      472276 :     else if (resultRelInfo->ri_FdwRoutine)
     423             :     {
     424             :         /*
     425             :          * insert into foreign table: let the FDW do it
     426             :          */
     427           0 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
     428             :                                                                resultRelInfo,
     429             :                                                                slot,
     430             :                                                                planSlot);
     431             : 
     432           0 :         if (slot == NULL)       /* "do nothing" */
     433           0 :             return NULL;
     434             : 
     435             :         /* FDW might have changed tuple */
     436           0 :         tuple = ExecMaterializeSlot(slot);
     437             : 
     438             :         /*
     439             :          * AFTER ROW Triggers or RETURNING expressions might reference the
     440             :          * tableoid column, so initialize t_tableOid before evaluating them.
     441             :          */
     442           0 :         tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
     443             : 
     444           0 :         newId = InvalidOid;
     445             :     }
     446             :     else
     447             :     {
     448             :         /*
     449             :          * We always check the partition constraint, including when the tuple
     450             :          * got here via tuple-routing.  However we don't need to in the latter
     451             :          * case if no BR trigger is defined on the partition.  Note that a BR
     452             :          * trigger might modify the tuple such that the partition constraint
     453             :          * is no longer satisfied, so we need to check in that case.
     454             :          */
     455      472276 :         bool        check_partition_constr =
     456      472276 :         (resultRelInfo->ri_PartitionCheck != NIL);
     457             : 
     458             :         /*
     459             :          * Constraints might reference the tableoid column, so initialize
     460             :          * t_tableOid before evaluating them.
     461             :          */
     462      472276 :         tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
     463             : 
     464             :         /*
     465             :          * Check any RLS INSERT WITH CHECK policies
     466             :          *
     467             :          * ExecWithCheckOptions() will skip any WCOs which are not of the kind
     468             :          * we are looking for at this point.
     469             :          */
     470      472276 :         if (resultRelInfo->ri_WithCheckOptions != NIL)
     471          63 :             ExecWithCheckOptions(WCO_RLS_INSERT_CHECK,
     472             :                                  resultRelInfo, slot, estate);
     473             : 
     474             :         /*
     475             :          * No need though if the tuple has been routed, and a BR trigger
     476             :          * doesn't exist.
     477             :          */
     478      472409 :         if (saved_resultRelInfo != NULL &&
     479         160 :             !(resultRelInfo->ri_TrigDesc &&
     480           9 :               resultRelInfo->ri_TrigDesc->trig_insert_before_row))
     481         145 :             check_partition_constr = false;
     482             : 
     483             :         /* Check the constraints of the tuple */
     484      472258 :         if (resultRelationDesc->rd_att->constr || check_partition_constr)
     485       35083 :             ExecConstraints(resultRelInfo, slot, estate);
     486             : 
     487      472172 :         if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
     488          54 :         {
     489             :             /* Perform a speculative insertion. */
     490             :             uint32      specToken;
     491             :             ItemPointerData conflictTid;
     492             :             bool        specConflict;
     493             : 
     494             :             /*
     495             :              * Do a non-conclusive check for conflicts first.
     496             :              *
     497             :              * We're not holding any locks yet, so this doesn't guarantee that
     498             :              * the later insert won't conflict.  But it avoids leaving behind
     499             :              * a lot of canceled speculative insertions, if you run a lot of
     500             :              * INSERT ON CONFLICT statements that do conflict.
     501             :              *
     502             :              * We loop back here if we find a conflict below, either during
     503             :              * the pre-check, or when we re-check after inserting the tuple
     504             :              * speculatively.
     505             :              */
     506             :     vlock:
     507         144 :             specConflict = false;
     508         144 :             if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
     509             :                                            arbiterIndexes))
     510             :             {
     511             :                 /* committed conflict tuple found */
     512          88 :                 if (onconflict == ONCONFLICT_UPDATE)
     513             :                 {
     514             :                     /*
     515             :                      * In case of ON CONFLICT DO UPDATE, execute the UPDATE
     516             :                      * part.  Be prepared to retry if the UPDATE fails because
     517             :                      * of another concurrent UPDATE/DELETE to the conflict
     518             :                      * tuple.
     519             :                      */
     520          75 :                     TupleTableSlot *returning = NULL;
     521             : 
     522          75 :                     if (ExecOnConflictUpdate(mtstate, resultRelInfo,
     523             :                                              &conflictTid, planSlot, slot,
     524             :                                              estate, canSetTag, &returning))
     525             :                     {
     526          65 :                         InstrCountFiltered2(&mtstate->ps, 1);
     527          65 :                         return returning;
     528             :                     }
     529             :                     else
     530           0 :                         goto vlock;
     531             :                 }
     532             :                 else
     533             :                 {
     534             :                     /*
     535             :                      * In case of ON CONFLICT DO NOTHING, do nothing. However,
     536             :                      * verify that the tuple is visible to the executor's MVCC
     537             :                      * snapshot at higher isolation levels.
     538             :                      */
     539          13 :                     Assert(onconflict == ONCONFLICT_NOTHING);
     540          13 :                     ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid);
     541          13 :                     InstrCountFiltered2(&mtstate->ps, 1);
     542          13 :                     return NULL;
     543             :                 }
     544             :             }
     545             : 
     546             :             /*
     547             :              * Before we start insertion proper, acquire our "speculative
     548             :              * insertion lock".  Others can use that to wait for us to decide
     549             :              * if we're going to go ahead with the insertion, instead of
     550             :              * waiting for the whole transaction to complete.
     551             :              */
     552          55 :             specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
     553          55 :             HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken);
     554             : 
     555             :             /* insert the tuple, with the speculative token */
     556          55 :             newId = heap_insert(resultRelationDesc, tuple,
     557             :                                 estate->es_output_cid,
     558             :                                 HEAP_INSERT_SPECULATIVE,
     559             :                                 NULL);
     560             : 
     561             :             /* insert index entries for tuple */
     562          55 :             recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
     563             :                                                    estate, true, &specConflict,
     564             :                                                    arbiterIndexes);
     565             : 
     566             :             /* adjust the tuple's state accordingly */
     567          54 :             if (!specConflict)
     568          54 :                 heap_finish_speculative(resultRelationDesc, tuple);
     569             :             else
     570           0 :                 heap_abort_speculative(resultRelationDesc, tuple);
     571             : 
     572             :             /*
     573             :              * Wake up anyone waiting for our decision.  They will re-check
     574             :              * the tuple, see that it's no longer speculative, and wait on our
     575             :              * XID as if this was a regularly inserted tuple all along.  Or if
     576             :              * we killed the tuple, they will see it's dead, and proceed as if
     577             :              * the tuple never existed.
     578             :              */
     579          54 :             SpeculativeInsertionLockRelease(GetCurrentTransactionId());
     580             : 
     581             :             /*
     582             :              * If there was a conflict, start from the beginning.  We'll do
     583             :              * the pre-check again, which will now find the conflicting tuple
     584             :              * (unless it aborts before we get there).
     585             :              */
     586          54 :             if (specConflict)
     587             :             {
     588           0 :                 list_free(recheckIndexes);
     589           0 :                 goto vlock;
     590             :             }
     591             : 
     592             :             /* Since there was no insertion conflict, we're done */
     593             :         }
     594             :         else
     595             :         {
     596             :             /*
     597             :              * insert the tuple normally.
     598             :              *
     599             :              * Note: heap_insert returns the tid (location) of the new tuple
     600             :              * in the t_self field.
     601             :              */
     602      472028 :             newId = heap_insert(resultRelationDesc, tuple,
     603             :                                 estate->es_output_cid,
     604             :                                 0, NULL);
     605             : 
     606             :             /* insert index entries for tuple */
     607      472027 :             if (resultRelInfo->ri_NumIndices > 0)
     608      163155 :                 recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
     609             :                                                        estate, false, NULL,
     610             :                                                        arbiterIndexes);
     611             :         }
     612             :     }
     613             : 
     614      472065 :     if (canSetTag)
     615             :     {
     616      471896 :         (estate->es_processed)++;
     617      471896 :         estate->es_lastoid = newId;
     618      471896 :         setLastTid(&(tuple->t_self));
     619             :     }
     620             : 
     621             :     /* AFTER ROW INSERT Triggers */
     622      472065 :     ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes,
     623      472065 :                          mtstate->mt_transition_capture);
     624             : 
     625      472065 :     list_free(recheckIndexes);
     626             : 
     627             :     /*
     628             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
     629             :      * required to do this after testing all constraints and uniqueness
     630             :      * violations per the SQL spec, so we do it after actually inserting the
     631             :      * record into the heap and all indexes.
     632             :      *
     633             :      * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
     634             :      * tuple will never be seen, if it violates the WITH CHECK OPTION.
     635             :      *
     636             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
     637             :      * are looking for at this point.
     638             :      */
     639      472065 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
     640          41 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
     641             : 
     642             :     /* Process RETURNING if present */
     643      472045 :     if (resultRelInfo->ri_projectReturning)
     644         250 :         result = ExecProcessReturning(resultRelInfo, slot, planSlot);
     645             : 
     646      472045 :     if (saved_resultRelInfo)
     647         143 :         estate->es_result_relation_info = saved_resultRelInfo;
     648             : 
     649      472045 :     return result;
     650             : }
     651             : 
     652             : /* ----------------------------------------------------------------
     653             :  *      ExecDelete
     654             :  *
     655             :  *      DELETE is like UPDATE, except that we delete the tuple and no
     656             :  *      index modifications are needed.
     657             :  *
     658             :  *      When deleting from a table, tupleid identifies the tuple to
     659             :  *      delete and oldtuple is NULL.  When deleting from a view,
     660             :  *      oldtuple is passed to the INSTEAD OF triggers and identifies
     661             :  *      what to delete, and tupleid is invalid.  When deleting from a
     662             :  *      foreign table, tupleid is invalid; the FDW has to figure out
     663             :  *      which row to delete using data from the planSlot.  oldtuple is
     664             :  *      passed to foreign table triggers; it is NULL when the foreign
     665             :  *      table has no relevant triggers.
     666             :  *
     667             :  *      Returns RETURNING result if any, otherwise NULL.
     668             :  * ----------------------------------------------------------------
     669             :  */
     670             : static TupleTableSlot *
     671       65741 : ExecDelete(ModifyTableState *mtstate,
     672             :            ItemPointer tupleid,
     673             :            HeapTuple oldtuple,
     674             :            TupleTableSlot *planSlot,
     675             :            EPQState *epqstate,
     676             :            EState *estate,
     677             :            bool canSetTag)
     678             : {
     679             :     ResultRelInfo *resultRelInfo;
     680             :     Relation    resultRelationDesc;
     681             :     HTSU_Result result;
     682             :     HeapUpdateFailureData hufd;
     683       65741 :     TupleTableSlot *slot = NULL;
     684             : 
     685             :     /*
     686             :      * get information on the (current) result relation
     687             :      */
     688       65741 :     resultRelInfo = estate->es_result_relation_info;
     689       65741 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     690             : 
     691             :     /* BEFORE ROW DELETE Triggers */
     692       66676 :     if (resultRelInfo->ri_TrigDesc &&
     693         935 :         resultRelInfo->ri_TrigDesc->trig_delete_before_row)
     694             :     {
     695             :         bool        dodelete;
     696             : 
     697          21 :         dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
     698             :                                         tupleid, oldtuple);
     699             : 
     700          16 :         if (!dodelete)          /* "do nothing" */
     701           1 :             return NULL;
     702             :     }
     703             : 
     704             :     /* INSTEAD OF ROW DELETE Triggers */
     705       66664 :     if (resultRelInfo->ri_TrigDesc &&
     706         929 :         resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
     707           7 :     {
     708             :         bool        dodelete;
     709             : 
     710           8 :         Assert(oldtuple != NULL);
     711           8 :         dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
     712             : 
     713           8 :         if (!dodelete)          /* "do nothing" */
     714           1 :             return NULL;
     715             :     }
     716       65727 :     else if (resultRelInfo->ri_FdwRoutine)
     717             :     {
     718             :         HeapTuple   tuple;
     719             : 
     720             :         /*
     721             :          * delete from foreign table: let the FDW do it
     722             :          *
     723             :          * We offer the trigger tuple slot as a place to store RETURNING data,
     724             :          * although the FDW can return some other slot if it wants.  Set up
     725             :          * the slot's tupdesc so the FDW doesn't need to do that for itself.
     726             :          */
     727           0 :         slot = estate->es_trig_tuple_slot;
     728           0 :         if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
     729           0 :             ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
     730             : 
     731           0 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
     732             :                                                                resultRelInfo,
     733             :                                                                slot,
     734             :                                                                planSlot);
     735             : 
     736           0 :         if (slot == NULL)       /* "do nothing" */
     737           0 :             return NULL;
     738             : 
     739             :         /*
     740             :          * RETURNING expressions might reference the tableoid column, so
     741             :          * initialize t_tableOid before evaluating them.
     742             :          */
     743           0 :         if (slot->tts_isempty)
     744           0 :             ExecStoreAllNullTuple(slot);
     745           0 :         tuple = ExecMaterializeSlot(slot);
     746           0 :         tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
     747             :     }
     748             :     else
     749             :     {
     750             :         /*
     751             :          * delete the tuple
     752             :          *
     753             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
     754             :          * that the row to be deleted is visible to that snapshot, and throw a
     755             :          * can't-serialize error if not. This is a special-case behavior
     756             :          * needed for referential integrity updates in transaction-snapshot
     757             :          * mode transactions.
     758             :          */
     759             : ldelete:;
     760       65727 :         result = heap_delete(resultRelationDesc, tupleid,
     761             :                              estate->es_output_cid,
     762             :                              estate->es_crosscheck_snapshot,
     763             :                              true /* wait for commit */ ,
     764             :                              &hufd);
     765       65727 :         switch (result)
     766             :         {
     767             :             case HeapTupleSelfUpdated:
     768             : 
     769             :                 /*
     770             :                  * The target tuple was already updated or deleted by the
     771             :                  * current command, or by a later command in the current
     772             :                  * transaction.  The former case is possible in a join DELETE
     773             :                  * where multiple tuples join to the same target tuple. This
     774             :                  * is somewhat questionable, but Postgres has always allowed
     775             :                  * it: we just ignore additional deletion attempts.
     776             :                  *
     777             :                  * The latter case arises if the tuple is modified by a
     778             :                  * command in a BEFORE trigger, or perhaps by a command in a
     779             :                  * volatile function used in the query.  In such situations we
     780             :                  * should not ignore the deletion, but it is equally unsafe to
     781             :                  * proceed.  We don't want to discard the original DELETE
     782             :                  * while keeping the triggered actions based on its deletion;
     783             :                  * and it would be no better to allow the original DELETE
     784             :                  * while discarding updates that it triggered.  The row update
     785             :                  * carries some information that might be important according
     786             :                  * to business rules; so throwing an error is the only safe
     787             :                  * course.
     788             :                  *
     789             :                  * If a trigger actually intends this type of interaction, it
     790             :                  * can re-execute the DELETE and then return NULL to cancel
     791             :                  * the outer delete.
     792             :                  */
     793           1 :                 if (hufd.cmax != estate->es_output_cid)
     794           1 :                     ereport(ERROR,
     795             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
     796             :                              errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
     797             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
     798             : 
     799             :                 /* Else, already deleted by self; nothing to do */
     800           0 :                 return NULL;
     801             : 
     802             :             case HeapTupleMayBeUpdated:
     803       65726 :                 break;
     804             : 
     805             :             case HeapTupleUpdated:
     806           0 :                 if (IsolationUsesXactSnapshot())
     807           0 :                     ereport(ERROR,
     808             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     809             :                              errmsg("could not serialize access due to concurrent update")));
     810           0 :                 if (!ItemPointerEquals(tupleid, &hufd.ctid))
     811             :                 {
     812             :                     TupleTableSlot *epqslot;
     813             : 
     814           0 :                     epqslot = EvalPlanQual(estate,
     815             :                                            epqstate,
     816             :                                            resultRelationDesc,
     817             :                                            resultRelInfo->ri_RangeTableIndex,
     818             :                                            LockTupleExclusive,
     819             :                                            &hufd.ctid,
     820             :                                            hufd.xmax);
     821           0 :                     if (!TupIsNull(epqslot))
     822             :                     {
     823           0 :                         *tupleid = hufd.ctid;
     824           0 :                         goto ldelete;
     825             :                     }
     826             :                 }
     827             :                 /* tuple already deleted; nothing to do */
     828           0 :                 return NULL;
     829             : 
     830             :             default:
     831           0 :                 elog(ERROR, "unrecognized heap_delete status: %u", result);
     832             :                 return NULL;
     833             :         }
     834             : 
     835             :         /*
     836             :          * Note: Normally one would think that we have to delete index tuples
     837             :          * associated with the heap tuple now...
     838             :          *
     839             :          * ... but in POSTGRES, we have no need to do this because VACUUM will
     840             :          * take care of it later.  We can't delete index tuples immediately
     841             :          * anyway, since the tuple is still visible to other transactions.
     842             :          */
     843             :     }
     844             : 
     845       65733 :     if (canSetTag)
     846       65699 :         (estate->es_processed)++;
     847             : 
     848             :     /* AFTER ROW DELETE Triggers */
     849       65733 :     ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
     850       65733 :                          mtstate->mt_transition_capture);
     851             : 
     852             :     /* Process RETURNING if present */
     853       65733 :     if (resultRelInfo->ri_projectReturning)
     854             :     {
     855             :         /*
     856             :          * We have to put the target tuple into a slot, which means first we
     857             :          * gotta fetch it.  We can use the trigger tuple slot.
     858             :          */
     859             :         TupleTableSlot *rslot;
     860             :         HeapTupleData deltuple;
     861             :         Buffer      delbuffer;
     862             : 
     863          70 :         if (resultRelInfo->ri_FdwRoutine)
     864             :         {
     865             :             /* FDW must have provided a slot containing the deleted row */
     866           0 :             Assert(!TupIsNull(slot));
     867           0 :             delbuffer = InvalidBuffer;
     868             :         }
     869             :         else
     870             :         {
     871          70 :             slot = estate->es_trig_tuple_slot;
     872          70 :             if (oldtuple != NULL)
     873             :             {
     874           4 :                 deltuple = *oldtuple;
     875           4 :                 delbuffer = InvalidBuffer;
     876             :             }
     877             :             else
     878             :             {
     879          66 :                 deltuple.t_self = *tupleid;
     880          66 :                 if (!heap_fetch(resultRelationDesc, SnapshotAny,
     881             :                                 &deltuple, &delbuffer, false, NULL))
     882           0 :                     elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
     883             :             }
     884             : 
     885          70 :             if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc))
     886          25 :                 ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc));
     887          70 :             ExecStoreTuple(&deltuple, slot, InvalidBuffer, false);
     888             :         }
     889             : 
     890          70 :         rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
     891             : 
     892             :         /*
     893             :          * Before releasing the target tuple again, make sure rslot has a
     894             :          * local copy of any pass-by-reference values.
     895             :          */
     896          70 :         ExecMaterializeSlot(rslot);
     897             : 
     898          70 :         ExecClearTuple(slot);
     899          70 :         if (BufferIsValid(delbuffer))
     900          66 :             ReleaseBuffer(delbuffer);
     901             : 
     902          70 :         return rslot;
     903             :     }
     904             : 
     905       65663 :     return NULL;
     906             : }
     907             : 
     908             : /* ----------------------------------------------------------------
     909             :  *      ExecUpdate
     910             :  *
     911             :  *      note: we can't run UPDATE queries with transactions
     912             :  *      off because UPDATEs are actually INSERTs and our
     913             :  *      scan will mistakenly loop forever, updating the tuple
     914             :  *      it just inserted..  This should be fixed but until it
     915             :  *      is, we don't want to get stuck in an infinite loop
     916             :  *      which corrupts your database..
     917             :  *
     918             :  *      When updating a table, tupleid identifies the tuple to
     919             :  *      update and oldtuple is NULL.  When updating a view, oldtuple
     920             :  *      is passed to the INSTEAD OF triggers and identifies what to
     921             :  *      update, and tupleid is invalid.  When updating a foreign table,
     922             :  *      tupleid is invalid; the FDW has to figure out which row to
     923             :  *      update using data from the planSlot.  oldtuple is passed to
     924             :  *      foreign table triggers; it is NULL when the foreign table has
     925             :  *      no relevant triggers.
     926             :  *
     927             :  *      Returns RETURNING result if any, otherwise NULL.
     928             :  * ----------------------------------------------------------------
     929             :  */
     930             : static TupleTableSlot *
     931        3569 : ExecUpdate(ModifyTableState *mtstate,
     932             :            ItemPointer tupleid,
     933             :            HeapTuple oldtuple,
     934             :            TupleTableSlot *slot,
     935             :            TupleTableSlot *planSlot,
     936             :            EPQState *epqstate,
     937             :            EState *estate,
     938             :            bool canSetTag)
     939             : {
     940             :     HeapTuple   tuple;
     941             :     ResultRelInfo *resultRelInfo;
     942             :     Relation    resultRelationDesc;
     943             :     HTSU_Result result;
     944             :     HeapUpdateFailureData hufd;
     945        3569 :     List       *recheckIndexes = NIL;
     946             : 
     947             :     /*
     948             :      * abort the operation if not running transactions
     949             :      */
     950        3569 :     if (IsBootstrapProcessingMode())
     951           0 :         elog(ERROR, "cannot UPDATE during bootstrap");
     952             : 
     953             :     /*
     954             :      * get the heap tuple out of the tuple table slot, making sure we have a
     955             :      * writable copy
     956             :      */
     957        3569 :     tuple = ExecMaterializeSlot(slot);
     958             : 
     959             :     /*
     960             :      * get information on the (current) result relation
     961             :      */
     962        3569 :     resultRelInfo = estate->es_result_relation_info;
     963        3569 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     964             : 
     965             :     /* BEFORE ROW UPDATE Triggers */
     966        4045 :     if (resultRelInfo->ri_TrigDesc &&
     967         476 :         resultRelInfo->ri_TrigDesc->trig_update_before_row)
     968             :     {
     969         202 :         slot = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
     970             :                                     tupleid, oldtuple, slot);
     971             : 
     972         200 :         if (slot == NULL)       /* "do nothing" */
     973          22 :             return NULL;
     974             : 
     975             :         /* trigger might have changed tuple */
     976         178 :         tuple = ExecMaterializeSlot(slot);
     977             :     }
     978             : 
     979             :     /* INSTEAD OF ROW UPDATE Triggers */
     980        3997 :     if (resultRelInfo->ri_TrigDesc &&
     981         452 :         resultRelInfo->ri_TrigDesc->trig_update_instead_row)
     982             :     {
     983          18 :         slot = ExecIRUpdateTriggers(estate, resultRelInfo,
     984             :                                     oldtuple, slot);
     985             : 
     986          17 :         if (slot == NULL)       /* "do nothing" */
     987           3 :             return NULL;
     988             : 
     989             :         /* trigger might have changed tuple */
     990          14 :         tuple = ExecMaterializeSlot(slot);
     991             :     }
     992        3527 :     else if (resultRelInfo->ri_FdwRoutine)
     993             :     {
     994             :         /*
     995             :          * update in foreign table: let the FDW do it
     996             :          */
     997           0 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
     998             :                                                                resultRelInfo,
     999             :                                                                slot,
    1000             :                                                                planSlot);
    1001             : 
    1002           0 :         if (slot == NULL)       /* "do nothing" */
    1003           0 :             return NULL;
    1004             : 
    1005             :         /* FDW might have changed tuple */
    1006           0 :         tuple = ExecMaterializeSlot(slot);
    1007             : 
    1008             :         /*
    1009             :          * AFTER ROW Triggers or RETURNING expressions might reference the
    1010             :          * tableoid column, so initialize t_tableOid before evaluating them.
    1011             :          */
    1012           0 :         tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
    1013             :     }
    1014             :     else
    1015             :     {
    1016             :         LockTupleMode lockmode;
    1017             : 
    1018             :         /*
    1019             :          * Constraints might reference the tableoid column, so initialize
    1020             :          * t_tableOid before evaluating them.
    1021             :          */
    1022        3527 :         tuple->t_tableOid = RelationGetRelid(resultRelationDesc);
    1023             : 
    1024             :         /*
    1025             :          * Check any RLS UPDATE WITH CHECK policies
    1026             :          *
    1027             :          * If we generate a new candidate tuple after EvalPlanQual testing, we
    1028             :          * must loop back here and recheck any RLS policies and constraints.
    1029             :          * (We don't need to redo triggers, however.  If there are any BEFORE
    1030             :          * triggers then trigger.c will have done heap_lock_tuple to lock the
    1031             :          * correct tuple, so there's no need to do them again.)
    1032             :          *
    1033             :          * ExecWithCheckOptions() will skip any WCOs which are not of the kind
    1034             :          * we are looking for at this point.
    1035             :          */
    1036             : lreplace:;
    1037        3527 :         if (resultRelInfo->ri_WithCheckOptions != NIL)
    1038          66 :             ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
    1039             :                                  resultRelInfo, slot, estate);
    1040             : 
    1041             :         /*
    1042             :          * Check the constraints of the tuple.  Note that we pass the same
    1043             :          * slot for the orig_slot argument, because unlike ExecInsert(), no
    1044             :          * tuple-routing is performed here, hence the slot remains unchanged.
    1045             :          */
    1046        3523 :         if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck)
    1047         515 :             ExecConstraints(resultRelInfo, slot, estate);
    1048             : 
    1049             :         /*
    1050             :          * replace the heap tuple
    1051             :          *
    1052             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
    1053             :          * that the row to be updated is visible to that snapshot, and throw a
    1054             :          * can't-serialize error if not. This is a special-case behavior
    1055             :          * needed for referential integrity updates in transaction-snapshot
    1056             :          * mode transactions.
    1057             :          */
    1058        3519 :         result = heap_update(resultRelationDesc, tupleid, tuple,
    1059             :                              estate->es_output_cid,
    1060             :                              estate->es_crosscheck_snapshot,
    1061             :                              true /* wait for commit */ ,
    1062             :                              &hufd, &lockmode);
    1063        3519 :         switch (result)
    1064             :         {
    1065             :             case HeapTupleSelfUpdated:
    1066             : 
    1067             :                 /*
    1068             :                  * The target tuple was already updated or deleted by the
    1069             :                  * current command, or by a later command in the current
    1070             :                  * transaction.  The former case is possible in a join UPDATE
    1071             :                  * where multiple tuples join to the same target tuple. This
    1072             :                  * is pretty questionable, but Postgres has always allowed it:
    1073             :                  * we just execute the first update action and ignore
    1074             :                  * additional update attempts.
    1075             :                  *
    1076             :                  * The latter case arises if the tuple is modified by a
    1077             :                  * command in a BEFORE trigger, or perhaps by a command in a
    1078             :                  * volatile function used in the query.  In such situations we
    1079             :                  * should not ignore the update, but it is equally unsafe to
    1080             :                  * proceed.  We don't want to discard the original UPDATE
    1081             :                  * while keeping the triggered actions based on it; and we
    1082             :                  * have no principled way to merge this update with the
    1083             :                  * previous ones.  So throwing an error is the only safe
    1084             :                  * course.
    1085             :                  *
    1086             :                  * If a trigger actually intends this type of interaction, it
    1087             :                  * can re-execute the UPDATE (assuming it can figure out how)
    1088             :                  * and then return NULL to cancel the outer update.
    1089             :                  */
    1090          11 :                 if (hufd.cmax != estate->es_output_cid)
    1091           1 :                     ereport(ERROR,
    1092             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
    1093             :                              errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
    1094             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
    1095             : 
    1096             :                 /* Else, already updated by self; nothing to do */
    1097          20 :                 return NULL;
    1098             : 
    1099             :             case HeapTupleMayBeUpdated:
    1100        3508 :                 break;
    1101             : 
    1102             :             case HeapTupleUpdated:
    1103           0 :                 if (IsolationUsesXactSnapshot())
    1104           0 :                     ereport(ERROR,
    1105             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1106             :                              errmsg("could not serialize access due to concurrent update")));
    1107           0 :                 if (!ItemPointerEquals(tupleid, &hufd.ctid))
    1108             :                 {
    1109             :                     TupleTableSlot *epqslot;
    1110             : 
    1111           0 :                     epqslot = EvalPlanQual(estate,
    1112             :                                            epqstate,
    1113             :                                            resultRelationDesc,
    1114             :                                            resultRelInfo->ri_RangeTableIndex,
    1115             :                                            lockmode,
    1116             :                                            &hufd.ctid,
    1117             :                                            hufd.xmax);
    1118           0 :                     if (!TupIsNull(epqslot))
    1119             :                     {
    1120           0 :                         *tupleid = hufd.ctid;
    1121           0 :                         slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
    1122           0 :                         tuple = ExecMaterializeSlot(slot);
    1123           0 :                         goto lreplace;
    1124             :                     }
    1125             :                 }
    1126             :                 /* tuple already deleted; nothing to do */
    1127           0 :                 return NULL;
    1128             : 
    1129             :             default:
    1130           0 :                 elog(ERROR, "unrecognized heap_update status: %u", result);
    1131             :                 return NULL;
    1132             :         }
    1133             : 
    1134             :         /*
    1135             :          * Note: instead of having to update the old index tuples associated
    1136             :          * with the heap tuple, all we do is form and insert new index tuples.
    1137             :          * This is because UPDATEs are actually DELETEs and INSERTs, and index
    1138             :          * tuple deletion is done later by VACUUM (see notes in ExecDelete).
    1139             :          * All we do here is insert new index tuples.  -cim 9/27/89
    1140             :          */
    1141             : 
    1142             :         /*
    1143             :          * insert index entries for tuple
    1144             :          *
    1145             :          * Note: heap_update returns the tid (location) of the new tuple in
    1146             :          * the t_self field.
    1147             :          *
    1148             :          * If it's a HOT update, we mustn't insert new index entries.
    1149             :          */
    1150        3508 :         if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple))
    1151        2436 :             recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self),
    1152             :                                                    estate, false, NULL, NIL);
    1153             :     }
    1154             : 
    1155        3520 :     if (canSetTag)
    1156        3437 :         (estate->es_processed)++;
    1157             : 
    1158             :     /* AFTER ROW UPDATE Triggers */
    1159        3520 :     ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple,
    1160             :                          recheckIndexes,
    1161        3520 :                          mtstate->mt_transition_capture);
    1162             : 
    1163        3520 :     list_free(recheckIndexes);
    1164             : 
    1165             :     /*
    1166             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
    1167             :      * required to do this after testing all constraints and uniqueness
    1168             :      * violations per the SQL spec, so we do it after actually updating the
    1169             :      * record in the heap and all indexes.
    1170             :      *
    1171             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
    1172             :      * are looking for at this point.
    1173             :      */
    1174        3520 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1175          64 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
    1176             : 
    1177             :     /* Process RETURNING if present */
    1178        3511 :     if (resultRelInfo->ri_projectReturning)
    1179         206 :         return ExecProcessReturning(resultRelInfo, slot, planSlot);
    1180             : 
    1181        3305 :     return NULL;
    1182             : }
    1183             : 
    1184             : /*
    1185             :  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
    1186             :  *
    1187             :  * Try to lock tuple for update as part of speculative insertion.  If
    1188             :  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
    1189             :  * (but still lock row, even though it may not satisfy estate's
    1190             :  * snapshot).
    1191             :  *
    1192             :  * Returns true if if we're done (with or without an update), or false if
    1193             :  * the caller must retry the INSERT from scratch.
    1194             :  */
    1195             : static bool
    1196          75 : ExecOnConflictUpdate(ModifyTableState *mtstate,
    1197             :                      ResultRelInfo *resultRelInfo,
    1198             :                      ItemPointer conflictTid,
    1199             :                      TupleTableSlot *planSlot,
    1200             :                      TupleTableSlot *excludedSlot,
    1201             :                      EState *estate,
    1202             :                      bool canSetTag,
    1203             :                      TupleTableSlot **returning)
    1204             : {
    1205          75 :     ExprContext *econtext = mtstate->ps.ps_ExprContext;
    1206          75 :     Relation    relation = resultRelInfo->ri_RelationDesc;
    1207          75 :     ExprState  *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere;
    1208             :     HeapTupleData tuple;
    1209             :     HeapUpdateFailureData hufd;
    1210             :     LockTupleMode lockmode;
    1211             :     HTSU_Result test;
    1212             :     Buffer      buffer;
    1213             : 
    1214             :     /* Determine lock mode to use */
    1215          75 :     lockmode = ExecUpdateLockMode(estate, resultRelInfo);
    1216             : 
    1217             :     /*
    1218             :      * Lock tuple for update.  Don't follow updates when tuple cannot be
    1219             :      * locked without doing so.  A row locking conflict here means our
    1220             :      * previous conclusion that the tuple is conclusively committed is not
    1221             :      * true anymore.
    1222             :      */
    1223          75 :     tuple.t_self = *conflictTid;
    1224          75 :     test = heap_lock_tuple(relation, &tuple, estate->es_output_cid,
    1225             :                            lockmode, LockWaitBlock, false, &buffer,
    1226             :                            &hufd);
    1227          75 :     switch (test)
    1228             :     {
    1229             :         case HeapTupleMayBeUpdated:
    1230             :             /* success! */
    1231          71 :             break;
    1232             : 
    1233             :         case HeapTupleInvisible:
    1234             : 
    1235             :             /*
    1236             :              * This can occur when a just inserted tuple is updated again in
    1237             :              * the same command. E.g. because multiple rows with the same
    1238             :              * conflicting key values are inserted.
    1239             :              *
    1240             :              * This is somewhat similar to the ExecUpdate()
    1241             :              * HeapTupleSelfUpdated case.  We do not want to proceed because
    1242             :              * it would lead to the same row being updated a second time in
    1243             :              * some unspecified order, and in contrast to plain UPDATEs
    1244             :              * there's no historical behavior to break.
    1245             :              *
    1246             :              * It is the user's responsibility to prevent this situation from
    1247             :              * occurring.  These problems are why SQL-2003 similarly specifies
    1248             :              * that for SQL MERGE, an exception must be raised in the event of
    1249             :              * an attempt to update the same row twice.
    1250             :              */
    1251           4 :             if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data)))
    1252           4 :                 ereport(ERROR,
    1253             :                         (errcode(ERRCODE_CARDINALITY_VIOLATION),
    1254             :                          errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
    1255             :                          errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
    1256             : 
    1257             :             /* This shouldn't happen */
    1258           0 :             elog(ERROR, "attempted to lock invisible tuple");
    1259             : 
    1260             :         case HeapTupleSelfUpdated:
    1261             : 
    1262             :             /*
    1263             :              * This state should never be reached. As a dirty snapshot is used
    1264             :              * to find conflicting tuples, speculative insertion wouldn't have
    1265             :              * seen this row to conflict with.
    1266             :              */
    1267           0 :             elog(ERROR, "unexpected self-updated tuple");
    1268             : 
    1269             :         case HeapTupleUpdated:
    1270           0 :             if (IsolationUsesXactSnapshot())
    1271           0 :                 ereport(ERROR,
    1272             :                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1273             :                          errmsg("could not serialize access due to concurrent update")));
    1274             : 
    1275             :             /*
    1276             :              * Tell caller to try again from the very start.
    1277             :              *
    1278             :              * It does not make sense to use the usual EvalPlanQual() style
    1279             :              * loop here, as the new version of the row might not conflict
    1280             :              * anymore, or the conflicting tuple has actually been deleted.
    1281             :              */
    1282           0 :             ReleaseBuffer(buffer);
    1283           0 :             return false;
    1284             : 
    1285             :         default:
    1286           0 :             elog(ERROR, "unrecognized heap_lock_tuple status: %u", test);
    1287             :     }
    1288             : 
    1289             :     /*
    1290             :      * Success, the tuple is locked.
    1291             :      *
    1292             :      * Reset per-tuple memory context to free any expression evaluation
    1293             :      * storage allocated in the previous cycle.
    1294             :      */
    1295          71 :     ResetExprContext(econtext);
    1296             : 
    1297             :     /*
    1298             :      * Verify that the tuple is visible to our MVCC snapshot if the current
    1299             :      * isolation level mandates that.
    1300             :      *
    1301             :      * It's not sufficient to rely on the check within ExecUpdate() as e.g.
    1302             :      * CONFLICT ... WHERE clause may prevent us from reaching that.
    1303             :      *
    1304             :      * This means we only ever continue when a new command in the current
    1305             :      * transaction could see the row, even though in READ COMMITTED mode the
    1306             :      * tuple will not be visible according to the current statement's
    1307             :      * snapshot.  This is in line with the way UPDATE deals with newer tuple
    1308             :      * versions.
    1309             :      */
    1310          71 :     ExecCheckHeapTupleVisible(estate, &tuple, buffer);
    1311             : 
    1312             :     /* Store target's existing tuple in the state's dedicated slot */
    1313          71 :     ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false);
    1314             : 
    1315             :     /*
    1316             :      * Make tuple and any needed join variables available to ExecQual and
    1317             :      * ExecProject.  The EXCLUDED tuple is installed in ecxt_innertuple, while
    1318             :      * the target's existing tuple is installed in the scantuple.  EXCLUDED
    1319             :      * has been made to reference INNER_VAR in setrefs.c, but there is no
    1320             :      * other redirection.
    1321             :      */
    1322          71 :     econtext->ecxt_scantuple = mtstate->mt_existing;
    1323          71 :     econtext->ecxt_innertuple = excludedSlot;
    1324          71 :     econtext->ecxt_outertuple = NULL;
    1325             : 
    1326          71 :     if (!ExecQual(onConflictSetWhere, econtext))
    1327             :     {
    1328           4 :         ReleaseBuffer(buffer);
    1329           4 :         InstrCountFiltered1(&mtstate->ps, 1);
    1330           4 :         return true;            /* done with the tuple */
    1331             :     }
    1332             : 
    1333          67 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1334             :     {
    1335             :         /*
    1336             :          * Check target's existing tuple against UPDATE-applicable USING
    1337             :          * security barrier quals (if any), enforced here as RLS checks/WCOs.
    1338             :          *
    1339             :          * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
    1340             :          * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
    1341             :          * but that's almost the extent of its special handling for ON
    1342             :          * CONFLICT DO UPDATE.
    1343             :          *
    1344             :          * The rewriter will also have associated UPDATE applicable straight
    1345             :          * RLS checks/WCOs for the benefit of the ExecUpdate() call that
    1346             :          * follows.  INSERTs and UPDATEs naturally have mutually exclusive WCO
    1347             :          * kinds, so there is no danger of spurious over-enforcement in the
    1348             :          * INSERT or UPDATE path.
    1349             :          */
    1350           7 :         ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
    1351             :                              mtstate->mt_existing,
    1352             :                              mtstate->ps.state);
    1353             :     }
    1354             : 
    1355             :     /* Project the new tuple version */
    1356          63 :     ExecProject(resultRelInfo->ri_onConflictSetProj);
    1357             : 
    1358             :     /*
    1359             :      * Note that it is possible that the target tuple has been modified in
    1360             :      * this session, after the above heap_lock_tuple. We choose to not error
    1361             :      * out in that case, in line with ExecUpdate's treatment of similar cases.
    1362             :      * This can happen if an UPDATE is triggered from within ExecQual(),
    1363             :      * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
    1364             :      * wCTE in the ON CONFLICT's SET.
    1365             :      */
    1366             : 
    1367             :     /* Execute UPDATE with projection */
    1368          63 :     *returning = ExecUpdate(mtstate, &tuple.t_self, NULL,
    1369             :                             mtstate->mt_conflproj, planSlot,
    1370             :                             &mtstate->mt_epqstate, mtstate->ps.state,
    1371             :                             canSetTag);
    1372             : 
    1373          61 :     ReleaseBuffer(buffer);
    1374          61 :     return true;
    1375             : }
    1376             : 
    1377             : 
    1378             : /*
    1379             :  * Process BEFORE EACH STATEMENT triggers
    1380             :  */
    1381             : static void
    1382        4425 : fireBSTriggers(ModifyTableState *node)
    1383             : {
    1384        4425 :     ResultRelInfo *resultRelInfo = node->resultRelInfo;
    1385             : 
    1386             :     /*
    1387             :      * If the node modifies a partitioned table, we must fire its triggers.
    1388             :      * Note that in that case, node->resultRelInfo points to the first leaf
    1389             :      * partition, not the root table.
    1390             :      */
    1391        4425 :     if (node->rootResultRelInfo != NULL)
    1392          11 :         resultRelInfo = node->rootResultRelInfo;
    1393             : 
    1394        4425 :     switch (node->operation)
    1395             :     {
    1396             :         case CMD_INSERT:
    1397        3533 :             ExecBSInsertTriggers(node->ps.state, resultRelInfo);
    1398        3533 :             if (node->mt_onconflict == ONCONFLICT_UPDATE)
    1399          97 :                 ExecBSUpdateTriggers(node->ps.state,
    1400             :                                      resultRelInfo);
    1401        3533 :             break;
    1402             :         case CMD_UPDATE:
    1403         609 :             ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
    1404         609 :             break;
    1405             :         case CMD_DELETE:
    1406         283 :             ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
    1407         283 :             break;
    1408             :         default:
    1409           0 :             elog(ERROR, "unknown operation");
    1410             :             break;
    1411             :     }
    1412        4425 : }
    1413             : 
    1414             : /*
    1415             :  * Return the ResultRelInfo for which we will fire AFTER STATEMENT triggers.
    1416             :  * This is also the relation into whose tuple format all captured transition
    1417             :  * tuples must be converted.
    1418             :  */
    1419             : static ResultRelInfo *
    1420        8653 : getASTriggerResultRelInfo(ModifyTableState *node)
    1421             : {
    1422             :     /*
    1423             :      * If the node modifies a partitioned table, we must fire its triggers.
    1424             :      * Note that in that case, node->resultRelInfo points to the first leaf
    1425             :      * partition, not the root table.
    1426             :      */
    1427        8653 :     if (node->rootResultRelInfo != NULL)
    1428          21 :         return node->rootResultRelInfo;
    1429             :     else
    1430        8632 :         return node->resultRelInfo;
    1431             : }
    1432             : 
    1433             : /*
    1434             :  * Process AFTER EACH STATEMENT triggers
    1435             :  */
    1436             : static void
    1437        4169 : fireASTriggers(ModifyTableState *node)
    1438             : {
    1439        4169 :     ResultRelInfo *resultRelInfo = getASTriggerResultRelInfo(node);
    1440             : 
    1441        4169 :     switch (node->operation)
    1442             :     {
    1443             :         case CMD_INSERT:
    1444        3324 :             if (node->mt_onconflict == ONCONFLICT_UPDATE)
    1445          83 :                 ExecASUpdateTriggers(node->ps.state,
    1446             :                                      resultRelInfo,
    1447          83 :                                      node->mt_transition_capture);
    1448        3324 :             ExecASInsertTriggers(node->ps.state, resultRelInfo,
    1449        3324 :                                  node->mt_transition_capture);
    1450        3324 :             break;
    1451             :         case CMD_UPDATE:
    1452         575 :             ExecASUpdateTriggers(node->ps.state, resultRelInfo,
    1453         575 :                                  node->mt_transition_capture);
    1454         575 :             break;
    1455             :         case CMD_DELETE:
    1456         270 :             ExecASDeleteTriggers(node->ps.state, resultRelInfo,
    1457         270 :                                  node->mt_transition_capture);
    1458         270 :             break;
    1459             :         default:
    1460           0 :             elog(ERROR, "unknown operation");
    1461             :             break;
    1462             :     }
    1463        4169 : }
    1464             : 
    1465             : /*
    1466             :  * Set up the state needed for collecting transition tuples for AFTER
    1467             :  * triggers.
    1468             :  */
    1469             : static void
    1470        4484 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
    1471             : {
    1472        4484 :     ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate);
    1473             :     int         i;
    1474             : 
    1475             :     /* Check for transition tables on the directly targeted relation. */
    1476        4484 :     mtstate->mt_transition_capture =
    1477        4484 :         MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc);
    1478             : 
    1479             :     /*
    1480             :      * If we found that we need to collect transition tuples then we may also
    1481             :      * need tuple conversion maps for any children that have TupleDescs that
    1482             :      * aren't compatible with the tuplestores.
    1483             :      */
    1484        4484 :     if (mtstate->mt_transition_capture != NULL)
    1485             :     {
    1486             :         ResultRelInfo *resultRelInfos;
    1487             :         int         numResultRelInfos;
    1488             : 
    1489             :         /* Find the set of partitions so that we can find their TupleDescs. */
    1490          48 :         if (mtstate->mt_partition_dispatch_info != NULL)
    1491             :         {
    1492             :             /*
    1493             :              * For INSERT via partitioned table, so we need TupleDescs based
    1494             :              * on the partition routing table.
    1495             :              */
    1496           4 :             resultRelInfos = mtstate->mt_partitions;
    1497           4 :             numResultRelInfos = mtstate->mt_num_partitions;
    1498             :         }
    1499             :         else
    1500             :         {
    1501             :             /* Otherwise we need the ResultRelInfo for each subplan. */
    1502          44 :             resultRelInfos = mtstate->resultRelInfo;
    1503          44 :             numResultRelInfos = mtstate->mt_nplans;
    1504             :         }
    1505             : 
    1506             :         /*
    1507             :          * Build array of conversion maps from each child's TupleDesc to the
    1508             :          * one used in the tuplestore.  The map pointers may be NULL when no
    1509             :          * conversion is necessary, which is hopefully a common case for
    1510             :          * partitions.
    1511             :          */
    1512          48 :         mtstate->mt_transition_tupconv_maps = (TupleConversionMap **)
    1513          48 :             palloc0(sizeof(TupleConversionMap *) * numResultRelInfos);
    1514         119 :         for (i = 0; i < numResultRelInfos; ++i)
    1515             :         {
    1516         142 :             mtstate->mt_transition_tupconv_maps[i] =
    1517          71 :                 convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc),
    1518          71 :                                        RelationGetDescr(targetRelInfo->ri_RelationDesc),
    1519             :                                        gettext_noop("could not convert row type"));
    1520             :         }
    1521             : 
    1522             :         /*
    1523             :          * Install the conversion map for the first plan for UPDATE and DELETE
    1524             :          * operations.  It will be advanced each time we switch to the next
    1525             :          * plan.  (INSERT operations set it every time.)
    1526             :          */
    1527          96 :         mtstate->mt_transition_capture->tcs_map =
    1528          48 :             mtstate->mt_transition_tupconv_maps[0];
    1529             :     }
    1530        4484 : }
    1531             : 
    1532             : /* ----------------------------------------------------------------
    1533             :  *     ExecModifyTable
    1534             :  *
    1535             :  *      Perform table modifications as required, and return RETURNING results
    1536             :  *      if needed.
    1537             :  * ----------------------------------------------------------------
    1538             :  */
    1539             : static TupleTableSlot *
    1540        5061 : ExecModifyTable(PlanState *pstate)
    1541             : {
    1542        5061 :     ModifyTableState *node = castNode(ModifyTableState, pstate);
    1543        5061 :     EState     *estate = node->ps.state;
    1544        5061 :     CmdType     operation = node->operation;
    1545             :     ResultRelInfo *saved_resultRelInfo;
    1546             :     ResultRelInfo *resultRelInfo;
    1547             :     PlanState  *subplanstate;
    1548             :     JunkFilter *junkfilter;
    1549             :     TupleTableSlot *slot;
    1550             :     TupleTableSlot *planSlot;
    1551        5061 :     ItemPointer tupleid = NULL;
    1552             :     ItemPointerData tuple_ctid;
    1553             :     HeapTupleData oldtupdata;
    1554             :     HeapTuple   oldtuple;
    1555             : 
    1556        5061 :     CHECK_FOR_INTERRUPTS();
    1557             : 
    1558             :     /*
    1559             :      * This should NOT get called during EvalPlanQual; we should have passed a
    1560             :      * subplan tree to EvalPlanQual, instead.  Use a runtime test not just
    1561             :      * Assert because this condition is easy to miss in testing.  (Note:
    1562             :      * although ModifyTable should not get executed within an EvalPlanQual
    1563             :      * operation, we do have to allow it to be initialized and shut down in
    1564             :      * case it is within a CTE subplan.  Hence this test must be here, not in
    1565             :      * ExecInitModifyTable.)
    1566             :      */
    1567        5061 :     if (estate->es_epqTuple != NULL)
    1568           0 :         elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
    1569             : 
    1570             :     /*
    1571             :      * If we've already completed processing, don't try to do more.  We need
    1572             :      * this test because ExecPostprocessPlan might call us an extra time, and
    1573             :      * our subplan's nodes aren't necessarily robust against being called
    1574             :      * extra times.
    1575             :      */
    1576        5061 :     if (node->mt_done)
    1577         111 :         return NULL;
    1578             : 
    1579             :     /*
    1580             :      * On first call, fire BEFORE STATEMENT triggers before proceeding.
    1581             :      */
    1582        4950 :     if (node->fireBSTriggers)
    1583             :     {
    1584        4425 :         fireBSTriggers(node);
    1585        4425 :         node->fireBSTriggers = false;
    1586             :     }
    1587             : 
    1588             :     /* Preload local variables */
    1589        4950 :     resultRelInfo = node->resultRelInfo + node->mt_whichplan;
    1590        4950 :     subplanstate = node->mt_plans[node->mt_whichplan];
    1591        4950 :     junkfilter = resultRelInfo->ri_junkFilter;
    1592             : 
    1593             :     /*
    1594             :      * es_result_relation_info must point to the currently active result
    1595             :      * relation while we are within this ModifyTable node.  Even though
    1596             :      * ModifyTable nodes can't be nested statically, they can be nested
    1597             :      * dynamically (since our subplan could include a reference to a modifying
    1598             :      * CTE).  So we have to save and restore the caller's value.
    1599             :      */
    1600        4950 :     saved_resultRelInfo = estate->es_result_relation_info;
    1601             : 
    1602        4950 :     estate->es_result_relation_info = resultRelInfo;
    1603             : 
    1604             :     /*
    1605             :      * Fetch rows from subplan(s), and execute the required table modification
    1606             :      * for each row.
    1607             :      */
    1608             :     for (;;)
    1609             :     {
    1610             :         /*
    1611             :          * Reset the per-output-tuple exprcontext.  This is needed because
    1612             :          * triggers expect to use that context as workspace.  It's a bit ugly
    1613             :          * to do this below the top level of the plan, however.  We might need
    1614             :          * to rethink this later.
    1615             :          */
    1616      545859 :         ResetPerTupleExprContext(estate);
    1617             : 
    1618      545859 :         planSlot = ExecProcNode(subplanstate);
    1619             : 
    1620      545821 :         if (TupIsNull(planSlot))
    1621             :         {
    1622             :             /* advance to next subplan if any */
    1623        4260 :             node->mt_whichplan++;
    1624        4260 :             if (node->mt_whichplan < node->mt_nplans)
    1625             :             {
    1626          91 :                 resultRelInfo++;
    1627          91 :                 subplanstate = node->mt_plans[node->mt_whichplan];
    1628          91 :                 junkfilter = resultRelInfo->ri_junkFilter;
    1629          91 :                 estate->es_result_relation_info = resultRelInfo;
    1630          91 :                 EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
    1631          91 :                                     node->mt_arowmarks[node->mt_whichplan]);
    1632          91 :                 if (node->mt_transition_capture != NULL)
    1633             :                 {
    1634             :                     /* Prepare to convert transition tuples from this child. */
    1635          15 :                     Assert(node->mt_transition_tupconv_maps != NULL);
    1636          30 :                     node->mt_transition_capture->tcs_map =
    1637          15 :                         node->mt_transition_tupconv_maps[node->mt_whichplan];
    1638             :                 }
    1639          91 :                 continue;
    1640             :             }
    1641             :             else
    1642        4169 :                 break;
    1643             :         }
    1644             : 
    1645             :         /*
    1646             :          * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
    1647             :          * here is compute the RETURNING expressions.
    1648             :          */
    1649      541561 :         if (resultRelInfo->ri_usesFdwDirectModify)
    1650             :         {
    1651           0 :             Assert(resultRelInfo->ri_projectReturning);
    1652             : 
    1653             :             /*
    1654             :              * A scan slot containing the data that was actually inserted,
    1655             :              * updated or deleted has already been made available to
    1656             :              * ExecProcessReturning by IterateDirectModify, so no need to
    1657             :              * provide it here.
    1658             :              */
    1659           0 :             slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
    1660             : 
    1661           0 :             estate->es_result_relation_info = saved_resultRelInfo;
    1662           0 :             return slot;
    1663             :         }
    1664             : 
    1665      541561 :         EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
    1666      541561 :         slot = planSlot;
    1667             : 
    1668      541561 :         oldtuple = NULL;
    1669      541561 :         if (junkfilter != NULL)
    1670             :         {
    1671             :             /*
    1672             :              * extract the 'ctid' or 'wholerow' junk attribute.
    1673             :              */
    1674       69247 :             if (operation == CMD_UPDATE || operation == CMD_DELETE)
    1675             :             {
    1676             :                 char        relkind;
    1677             :                 Datum       datum;
    1678             :                 bool        isNull;
    1679             : 
    1680       69247 :                 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    1681       69247 :                 if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
    1682             :                 {
    1683       69221 :                     datum = ExecGetJunkAttribute(slot,
    1684       69221 :                                                  junkfilter->jf_junkAttNo,
    1685             :                                                  &isNull);
    1686             :                     /* shouldn't ever get a null result... */
    1687       69221 :                     if (isNull)
    1688           0 :                         elog(ERROR, "ctid is NULL");
    1689             : 
    1690       69221 :                     tupleid = (ItemPointer) DatumGetPointer(datum);
    1691       69221 :                     tuple_ctid = *tupleid;  /* be sure we don't free ctid!! */
    1692       69221 :                     tupleid = &tuple_ctid;
    1693             :                 }
    1694             : 
    1695             :                 /*
    1696             :                  * Use the wholerow attribute, when available, to reconstruct
    1697             :                  * the old relation tuple.
    1698             :                  *
    1699             :                  * Foreign table updates have a wholerow attribute when the
    1700             :                  * relation has a row-level trigger.  Note that the wholerow
    1701             :                  * attribute does not carry system columns.  Foreign table
    1702             :                  * triggers miss seeing those, except that we know enough here
    1703             :                  * to set t_tableOid.  Quite separately from this, the FDW may
    1704             :                  * fetch its own junk attrs to identify the row.
    1705             :                  *
    1706             :                  * Other relevant relkinds, currently limited to views, always
    1707             :                  * have a wholerow attribute.
    1708             :                  */
    1709          26 :                 else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
    1710             :                 {
    1711          26 :                     datum = ExecGetJunkAttribute(slot,
    1712          26 :                                                  junkfilter->jf_junkAttNo,
    1713             :                                                  &isNull);
    1714             :                     /* shouldn't ever get a null result... */
    1715          26 :                     if (isNull)
    1716           0 :                         elog(ERROR, "wholerow is NULL");
    1717             : 
    1718          26 :                     oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
    1719          26 :                     oldtupdata.t_len =
    1720          26 :                         HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
    1721          26 :                     ItemPointerSetInvalid(&(oldtupdata.t_self));
    1722             :                     /* Historically, view triggers see invalid t_tableOid. */
    1723          26 :                     oldtupdata.t_tableOid =
    1724          26 :                         (relkind == RELKIND_VIEW) ? InvalidOid :
    1725           0 :                         RelationGetRelid(resultRelInfo->ri_RelationDesc);
    1726             : 
    1727          26 :                     oldtuple = &oldtupdata;
    1728             :                 }
    1729             :                 else
    1730           0 :                     Assert(relkind == RELKIND_FOREIGN_TABLE);
    1731             :             }
    1732             : 
    1733             :             /*
    1734             :              * apply the junkfilter if needed.
    1735             :              */
    1736       69247 :             if (operation != CMD_DELETE)
    1737        3506 :                 slot = ExecFilterJunk(junkfilter, slot);
    1738             :         }
    1739             : 
    1740      541561 :         switch (operation)
    1741             :         {
    1742             :             case CMD_INSERT:
    1743      472314 :                 slot = ExecInsert(node, slot, planSlot,
    1744             :                                   node->mt_arbiterindexes, node->mt_onconflict,
    1745      472314 :                                   estate, node->canSetTag);
    1746      472124 :                 break;
    1747             :             case CMD_UPDATE:
    1748        3506 :                 slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot,
    1749        3506 :                                   &node->mt_epqstate, estate, node->canSetTag);
    1750        3485 :                 break;
    1751             :             case CMD_DELETE:
    1752       65741 :                 slot = ExecDelete(node, tupleid, oldtuple, planSlot,
    1753       65741 :                                   &node->mt_epqstate, estate, node->canSetTag);
    1754       65735 :                 break;
    1755             :             default:
    1756           0 :                 elog(ERROR, "unknown operation");
    1757             :                 break;
    1758             :         }
    1759             : 
    1760             :         /*
    1761             :          * If we got a RETURNING result, return it to caller.  We'll continue
    1762             :          * the work on next call.
    1763             :          */
    1764      541344 :         if (slot)
    1765             :         {
    1766         526 :             estate->es_result_relation_info = saved_resultRelInfo;
    1767         526 :             return slot;
    1768             :         }
    1769      540909 :     }
    1770             : 
    1771             :     /* Restore es_result_relation_info before exiting */
    1772        4169 :     estate->es_result_relation_info = saved_resultRelInfo;
    1773             : 
    1774             :     /*
    1775             :      * We're done, but fire AFTER STATEMENT triggers before exiting.
    1776             :      */
    1777        4169 :     fireASTriggers(node);
    1778             : 
    1779        4169 :     node->mt_done = true;
    1780             : 
    1781        4169 :     return NULL;
    1782             : }
    1783             : 
    1784             : /* ----------------------------------------------------------------
    1785             :  *      ExecInitModifyTable
    1786             :  * ----------------------------------------------------------------
    1787             :  */
    1788             : ModifyTableState *
    1789        4484 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    1790             : {
    1791             :     ModifyTableState *mtstate;
    1792        4484 :     CmdType     operation = node->operation;
    1793        4484 :     int         nplans = list_length(node->plans);
    1794             :     ResultRelInfo *saved_resultRelInfo;
    1795             :     ResultRelInfo *resultRelInfo;
    1796             :     TupleDesc   tupDesc;
    1797             :     Plan       *subplan;
    1798             :     ListCell   *l;
    1799             :     int         i;
    1800             :     Relation    rel;
    1801             : 
    1802             :     /* check for unsupported flags */
    1803        4484 :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
    1804             : 
    1805             :     /*
    1806             :      * create state structure
    1807             :      */
    1808        4484 :     mtstate = makeNode(ModifyTableState);
    1809        4484 :     mtstate->ps.plan = (Plan *) node;
    1810        4484 :     mtstate->ps.state = estate;
    1811        4484 :     mtstate->ps.ExecProcNode = ExecModifyTable;
    1812             : 
    1813        4484 :     mtstate->operation = operation;
    1814        4484 :     mtstate->canSetTag = node->canSetTag;
    1815        4484 :     mtstate->mt_done = false;
    1816             : 
    1817        4484 :     mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
    1818        4484 :     mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
    1819             : 
    1820             :     /* If modifying a partitioned table, initialize the root table info */
    1821        4484 :     if (node->rootResultRelIndex >= 0)
    1822          22 :         mtstate->rootResultRelInfo = estate->es_root_result_relations +
    1823          11 :             node->rootResultRelIndex;
    1824             : 
    1825        4484 :     mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
    1826        4484 :     mtstate->mt_nplans = nplans;
    1827        4484 :     mtstate->mt_onconflict = node->onConflictAction;
    1828        4484 :     mtstate->mt_arbiterindexes = node->arbiterIndexes;
    1829             : 
    1830             :     /* set up epqstate with dummy subplan data for the moment */
    1831        4484 :     EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
    1832        4484 :     mtstate->fireBSTriggers = true;
    1833             : 
    1834             :     /*
    1835             :      * call ExecInitNode on each of the plans to be executed and save the
    1836             :      * results into the array "mt_plans".  This is also a convenient place to
    1837             :      * verify that the proposed target relations are valid and open their
    1838             :      * indexes for insertion of new index entries.  Note we *must* set
    1839             :      * estate->es_result_relation_info correctly while we initialize each
    1840             :      * sub-plan; ExecContextForcesOids depends on that!
    1841             :      */
    1842        4484 :     saved_resultRelInfo = estate->es_result_relation_info;
    1843             : 
    1844        4484 :     resultRelInfo = mtstate->resultRelInfo;
    1845        4484 :     i = 0;
    1846        9084 :     foreach(l, node->plans)
    1847             :     {
    1848        4600 :         subplan = (Plan *) lfirst(l);
    1849             : 
    1850             :         /* Initialize the usesFdwDirectModify flag */
    1851        4600 :         resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
    1852        4600 :                                                               node->fdwDirectModifyPlans);
    1853             : 
    1854             :         /*
    1855             :          * Verify result relation is a valid target for the current operation
    1856             :          */
    1857        4600 :         CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation);
    1858             : 
    1859             :         /*
    1860             :          * If there are indices on the result relation, open them and save
    1861             :          * descriptors in the result relation info, so that we can add new
    1862             :          * index entries for the tuples we add/update.  We need not do this
    1863             :          * for a DELETE, however, since deletion doesn't affect indexes. Also,
    1864             :          * inside an EvalPlanQual operation, the indexes might be open
    1865             :          * already, since we share the resultrel state with the original
    1866             :          * query.
    1867             :          */
    1868        4600 :         if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
    1869        1254 :             operation != CMD_DELETE &&
    1870        1254 :             resultRelInfo->ri_IndexRelationDescs == NULL)
    1871        1254 :             ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE);
    1872             : 
    1873             :         /* Now init the plan for this result rel */
    1874        4600 :         estate->es_result_relation_info = resultRelInfo;
    1875        4600 :         mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
    1876             : 
    1877             :         /* Also let FDWs init themselves for foreign-table result rels */
    1878        9200 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    1879        4600 :             resultRelInfo->ri_FdwRoutine != NULL &&
    1880           0 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
    1881             :         {
    1882           0 :             List       *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
    1883             : 
    1884           0 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
    1885             :                                                              resultRelInfo,
    1886             :                                                              fdw_private,
    1887             :                                                              i,
    1888             :                                                              eflags);
    1889             :         }
    1890             : 
    1891        4600 :         resultRelInfo++;
    1892        4600 :         i++;
    1893             :     }
    1894             : 
    1895        4484 :     estate->es_result_relation_info = saved_resultRelInfo;
    1896             : 
    1897             :     /* The root table RT index is at the head of the partitioned_rels list */
    1898        4484 :     if (node->partitioned_rels)
    1899             :     {
    1900             :         Index       root_rti;
    1901             :         Oid         root_oid;
    1902             : 
    1903          11 :         root_rti = linitial_int(node->partitioned_rels);
    1904          11 :         root_oid = getrelid(root_rti, estate->es_range_table);
    1905          11 :         rel = heap_open(root_oid, NoLock);  /* locked by InitPlan */
    1906             :     }
    1907             :     else
    1908        4473 :         rel = mtstate->resultRelInfo->ri_RelationDesc;
    1909             : 
    1910             :     /* Build state for INSERT tuple routing */
    1911        8041 :     if (operation == CMD_INSERT &&
    1912        3557 :         rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
    1913             :     {
    1914             :         PartitionDispatch *partition_dispatch_info;
    1915             :         ResultRelInfo *partitions;
    1916             :         TupleConversionMap **partition_tupconv_maps;
    1917             :         TupleTableSlot *partition_tuple_slot;
    1918             :         int         num_parted,
    1919             :                     num_partitions;
    1920             : 
    1921          65 :         ExecSetupPartitionTupleRouting(rel,
    1922             :                                        node->nominalRelation,
    1923             :                                        estate,
    1924             :                                        &partition_dispatch_info,
    1925             :                                        &partitions,
    1926             :                                        &partition_tupconv_maps,
    1927             :                                        &partition_tuple_slot,
    1928             :                                        &num_parted, &num_partitions);
    1929          65 :         mtstate->mt_partition_dispatch_info = partition_dispatch_info;
    1930          65 :         mtstate->mt_num_dispatch = num_parted;
    1931          65 :         mtstate->mt_partitions = partitions;
    1932          65 :         mtstate->mt_num_partitions = num_partitions;
    1933          65 :         mtstate->mt_partition_tupconv_maps = partition_tupconv_maps;
    1934          65 :         mtstate->mt_partition_tuple_slot = partition_tuple_slot;
    1935             :     }
    1936             : 
    1937             :     /* Build state for collecting transition tuples */
    1938        4484 :     ExecSetupTransitionCaptureState(mtstate, estate);
    1939             : 
    1940             :     /*
    1941             :      * Initialize any WITH CHECK OPTION constraints if needed.
    1942             :      */
    1943        4484 :     resultRelInfo = mtstate->resultRelInfo;
    1944        4484 :     i = 0;
    1945        4614 :     foreach(l, node->withCheckOptionLists)
    1946             :     {
    1947         130 :         List       *wcoList = (List *) lfirst(l);
    1948         130 :         List       *wcoExprs = NIL;
    1949             :         ListCell   *ll;
    1950             : 
    1951         323 :         foreach(ll, wcoList)
    1952             :         {
    1953         193 :             WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
    1954         193 :             ExprState  *wcoExpr = ExecInitQual((List *) wco->qual,
    1955         193 :                                                mtstate->mt_plans[i]);
    1956             : 
    1957         193 :             wcoExprs = lappend(wcoExprs, wcoExpr);
    1958             :         }
    1959             : 
    1960         130 :         resultRelInfo->ri_WithCheckOptions = wcoList;
    1961         130 :         resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
    1962         130 :         resultRelInfo++;
    1963         130 :         i++;
    1964             :     }
    1965             : 
    1966             :     /*
    1967             :      * Build WITH CHECK OPTION constraints for each leaf partition rel. Note
    1968             :      * that we didn't build the withCheckOptionList for each partition within
    1969             :      * the planner, but simple translation of the varattnos for each partition
    1970             :      * will suffice.  This only occurs for the INSERT case; UPDATE/DELETE
    1971             :      * cases are handled above.
    1972             :      */
    1973        4484 :     if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0)
    1974             :     {
    1975             :         List       *wcoList;
    1976             :         PlanState  *plan;
    1977             : 
    1978             :         /*
    1979             :          * In case of INSERT on partitioned tables, there is only one plan.
    1980             :          * Likewise, there is only one WITH CHECK OPTIONS list, not one per
    1981             :          * partition.  We make a copy of the WCO qual for each partition; note
    1982             :          * that, if there are SubPlans in there, they all end up attached to
    1983             :          * the one parent Plan node.
    1984             :          */
    1985           7 :         Assert(operation == CMD_INSERT &&
    1986             :                list_length(node->withCheckOptionLists) == 1 &&
    1987             :                mtstate->mt_nplans == 1);
    1988           7 :         wcoList = linitial(node->withCheckOptionLists);
    1989           7 :         plan = mtstate->mt_plans[0];
    1990           7 :         resultRelInfo = mtstate->mt_partitions;
    1991          23 :         for (i = 0; i < mtstate->mt_num_partitions; i++)
    1992             :         {
    1993          16 :             Relation    partrel = resultRelInfo->ri_RelationDesc;
    1994             :             List       *mapped_wcoList;
    1995          16 :             List       *wcoExprs = NIL;
    1996             :             ListCell   *ll;
    1997             : 
    1998             :             /* varno = node->nominalRelation */
    1999          16 :             mapped_wcoList = map_partition_varattnos(wcoList,
    2000          16 :                                                      node->nominalRelation,
    2001             :                                                      partrel, rel, NULL);
    2002          44 :             foreach(ll, mapped_wcoList)
    2003             :             {
    2004          28 :                 WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll));
    2005          28 :                 ExprState  *wcoExpr = ExecInitQual(castNode(List, wco->qual),
    2006             :                                                    plan);
    2007             : 
    2008          28 :                 wcoExprs = lappend(wcoExprs, wcoExpr);
    2009             :             }
    2010             : 
    2011          16 :             resultRelInfo->ri_WithCheckOptions = mapped_wcoList;
    2012          16 :             resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
    2013          16 :             resultRelInfo++;
    2014             :         }
    2015             :     }
    2016             : 
    2017             :     /*
    2018             :      * Initialize RETURNING projections if needed.
    2019             :      */
    2020        4484 :     if (node->returningLists)
    2021             :     {
    2022             :         TupleTableSlot *slot;
    2023             :         ExprContext *econtext;
    2024             :         List       *returningList;
    2025             : 
    2026             :         /*
    2027             :          * Initialize result tuple slot and assign its rowtype using the first
    2028             :          * RETURNING list.  We assume the rest will look the same.
    2029             :          */
    2030         255 :         tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists),
    2031             :                                  false);
    2032             : 
    2033             :         /* Set up a slot for the output of the RETURNING projection(s) */
    2034         255 :         ExecInitResultTupleSlot(estate, &mtstate->ps);
    2035         255 :         ExecAssignResultType(&mtstate->ps, tupDesc);
    2036         255 :         slot = mtstate->ps.ps_ResultTupleSlot;
    2037             : 
    2038             :         /* Need an econtext too */
    2039         255 :         if (mtstate->ps.ps_ExprContext == NULL)
    2040         255 :             ExecAssignExprContext(estate, &mtstate->ps);
    2041         255 :         econtext = mtstate->ps.ps_ExprContext;
    2042             : 
    2043             :         /*
    2044             :          * Build a projection for each result rel.
    2045             :          */
    2046         255 :         resultRelInfo = mtstate->resultRelInfo;
    2047         523 :         foreach(l, node->returningLists)
    2048             :         {
    2049         268 :             List       *rlist = (List *) lfirst(l);
    2050             : 
    2051         268 :             resultRelInfo->ri_projectReturning =
    2052         268 :                 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
    2053         268 :                                         resultRelInfo->ri_RelationDesc->rd_att);
    2054         268 :             resultRelInfo++;
    2055             :         }
    2056             : 
    2057             :         /*
    2058             :          * Build a projection for each leaf partition rel.  Note that we
    2059             :          * didn't build the returningList for each partition within the
    2060             :          * planner, but simple translation of the varattnos for each partition
    2061             :          * will suffice.  This only occurs for the INSERT case; UPDATE/DELETE
    2062             :          * are handled above.
    2063             :          */
    2064         255 :         resultRelInfo = mtstate->mt_partitions;
    2065         255 :         returningList = linitial(node->returningLists);
    2066         268 :         for (i = 0; i < mtstate->mt_num_partitions; i++)
    2067             :         {
    2068          13 :             Relation    partrel = resultRelInfo->ri_RelationDesc;
    2069             :             List       *rlist;
    2070             : 
    2071             :             /* varno = node->nominalRelation */
    2072          13 :             rlist = map_partition_varattnos(returningList,
    2073          13 :                                             node->nominalRelation,
    2074             :                                             partrel, rel, NULL);
    2075          13 :             resultRelInfo->ri_projectReturning =
    2076          13 :                 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
    2077          13 :                                         resultRelInfo->ri_RelationDesc->rd_att);
    2078          13 :             resultRelInfo++;
    2079             :         }
    2080             :     }
    2081             :     else
    2082             :     {
    2083             :         /*
    2084             :          * We still must construct a dummy result tuple type, because InitPlan
    2085             :          * expects one (maybe should change that?).
    2086             :          */
    2087        4229 :         tupDesc = ExecTypeFromTL(NIL, false);
    2088        4229 :         ExecInitResultTupleSlot(estate, &mtstate->ps);
    2089        4229 :         ExecAssignResultType(&mtstate->ps, tupDesc);
    2090             : 
    2091        4229 :         mtstate->ps.ps_ExprContext = NULL;
    2092             :     }
    2093             : 
    2094             :     /* Close the root partitioned rel if we opened it above. */
    2095        4484 :     if (rel != mtstate->resultRelInfo->ri_RelationDesc)
    2096          11 :         heap_close(rel, NoLock);
    2097             : 
    2098             :     /*
    2099             :      * If needed, Initialize target list, projection and qual for ON CONFLICT
    2100             :      * DO UPDATE.
    2101             :      */
    2102        4484 :     resultRelInfo = mtstate->resultRelInfo;
    2103        4484 :     if (node->onConflictAction == ONCONFLICT_UPDATE)
    2104             :     {
    2105             :         ExprContext *econtext;
    2106             :         TupleDesc   tupDesc;
    2107             : 
    2108             :         /* insert may only have one plan, inheritance is not expanded */
    2109         107 :         Assert(nplans == 1);
    2110             : 
    2111             :         /* already exists if created by RETURNING processing above */
    2112         107 :         if (mtstate->ps.ps_ExprContext == NULL)
    2113          62 :             ExecAssignExprContext(estate, &mtstate->ps);
    2114             : 
    2115         107 :         econtext = mtstate->ps.ps_ExprContext;
    2116             : 
    2117             :         /* initialize slot for the existing tuple */
    2118         107 :         mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state);
    2119         107 :         ExecSetSlotDescriptor(mtstate->mt_existing,
    2120         107 :                               resultRelInfo->ri_RelationDesc->rd_att);
    2121             : 
    2122             :         /* carried forward solely for the benefit of explain */
    2123         107 :         mtstate->mt_excludedtlist = node->exclRelTlist;
    2124             : 
    2125             :         /* create target slot for UPDATE SET projection */
    2126         107 :         tupDesc = ExecTypeFromTL((List *) node->onConflictSet,
    2127         107 :                                  resultRelInfo->ri_RelationDesc->rd_rel->relhasoids);
    2128         107 :         mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state);
    2129         107 :         ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc);
    2130             : 
    2131             :         /* build UPDATE SET projection state */
    2132         107 :         resultRelInfo->ri_onConflictSetProj =
    2133         107 :             ExecBuildProjectionInfo(node->onConflictSet, econtext,
    2134             :                                     mtstate->mt_conflproj, &mtstate->ps,
    2135         107 :                                     resultRelInfo->ri_RelationDesc->rd_att);
    2136             : 
    2137             :         /* build DO UPDATE WHERE clause expression */
    2138         107 :         if (node->onConflictWhere)
    2139             :         {
    2140             :             ExprState  *qualexpr;
    2141             : 
    2142          18 :             qualexpr = ExecInitQual((List *) node->onConflictWhere,
    2143             :                                     &mtstate->ps);
    2144             : 
    2145          18 :             resultRelInfo->ri_onConflictSetWhere = qualexpr;
    2146             :         }
    2147             :     }
    2148             : 
    2149             :     /*
    2150             :      * If we have any secondary relations in an UPDATE or DELETE, they need to
    2151             :      * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
    2152             :      * EvalPlanQual mechanism needs to be told about them.  Locate the
    2153             :      * relevant ExecRowMarks.
    2154             :      */
    2155        4668 :     foreach(l, node->rowMarks)
    2156             :     {
    2157         184 :         PlanRowMark *rc = lfirst_node(PlanRowMark, l);
    2158             :         ExecRowMark *erm;
    2159             : 
    2160             :         /* ignore "parent" rowmarks; they are irrelevant at runtime */
    2161         184 :         if (rc->isParent)
    2162           7 :             continue;
    2163             : 
    2164             :         /* find ExecRowMark (same for all subplans) */
    2165         177 :         erm = ExecFindRowMark(estate, rc->rti, false);
    2166             : 
    2167             :         /* build ExecAuxRowMark for each subplan */
    2168         399 :         for (i = 0; i < nplans; i++)
    2169             :         {
    2170             :             ExecAuxRowMark *aerm;
    2171             : 
    2172         222 :             subplan = mtstate->mt_plans[i]->plan;
    2173         222 :             aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
    2174         222 :             mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
    2175             :         }
    2176             :     }
    2177             : 
    2178             :     /* select first subplan */
    2179        4484 :     mtstate->mt_whichplan = 0;
    2180        4484 :     subplan = (Plan *) linitial(node->plans);
    2181        4484 :     EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
    2182        4484 :                         mtstate->mt_arowmarks[0]);
    2183             : 
    2184             :     /*
    2185             :      * Initialize the junk filter(s) if needed.  INSERT queries need a filter
    2186             :      * if there are any junk attrs in the tlist.  UPDATE and DELETE always
    2187             :      * need a filter, since there's always at least one junk attribute present
    2188             :      * --- no need to look first.  Typically, this will be a 'ctid' or
    2189             :      * 'wholerow' attribute, but in the case of a foreign data wrapper it
    2190             :      * might be a set of junk attributes sufficient to identify the remote
    2191             :      * row.
    2192             :      *
    2193             :      * If there are multiple result relations, each one needs its own junk
    2194             :      * filter.  Note multiple rels are only possible for UPDATE/DELETE, so we
    2195             :      * can't be fooled by some needing a filter and some not.
    2196             :      *
    2197             :      * This section of code is also a convenient place to verify that the
    2198             :      * output of an INSERT or UPDATE matches the target table(s).
    2199             :      */
    2200             :     {
    2201        4484 :         bool        junk_filter_needed = false;
    2202             : 
    2203        4484 :         switch (operation)
    2204             :         {
    2205             :             case CMD_INSERT:
    2206       12254 :                 foreach(l, subplan->targetlist)
    2207             :                 {
    2208        8697 :                     TargetEntry *tle = (TargetEntry *) lfirst(l);
    2209             : 
    2210        8697 :                     if (tle->resjunk)
    2211             :                     {
    2212           0 :                         junk_filter_needed = true;
    2213           0 :                         break;
    2214             :                     }
    2215             :                 }
    2216        3557 :                 break;
    2217             :             case CMD_UPDATE:
    2218             :             case CMD_DELETE:
    2219         927 :                 junk_filter_needed = true;
    2220         927 :                 break;
    2221             :             default:
    2222           0 :                 elog(ERROR, "unknown operation");
    2223             :                 break;
    2224             :         }
    2225             : 
    2226        4484 :         if (junk_filter_needed)
    2227             :         {
    2228         927 :             resultRelInfo = mtstate->resultRelInfo;
    2229        1970 :             for (i = 0; i < nplans; i++)
    2230             :             {
    2231             :                 JunkFilter *j;
    2232             : 
    2233        1043 :                 subplan = mtstate->mt_plans[i]->plan;
    2234        1043 :                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
    2235         710 :                     ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
    2236             :                                         subplan->targetlist);
    2237             : 
    2238        2086 :                 j = ExecInitJunkFilter(subplan->targetlist,
    2239        1043 :                                        resultRelInfo->ri_RelationDesc->rd_att->tdhasoid,
    2240             :                                        ExecInitExtraTupleSlot(estate));
    2241             : 
    2242        1043 :                 if (operation == CMD_UPDATE || operation == CMD_DELETE)
    2243             :                 {
    2244             :                     /* For UPDATE/DELETE, find the appropriate junk attr now */
    2245             :                     char        relkind;
    2246             : 
    2247        1043 :                     relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    2248        1043 :                     if (relkind == RELKIND_RELATION ||
    2249          30 :                         relkind == RELKIND_MATVIEW ||
    2250             :                         relkind == RELKIND_PARTITIONED_TABLE)
    2251             :                     {
    2252        1013 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
    2253        2026 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2254           0 :                             elog(ERROR, "could not find junk ctid column");
    2255             :                     }
    2256          30 :                     else if (relkind == RELKIND_FOREIGN_TABLE)
    2257             :                     {
    2258             :                         /*
    2259             :                          * When there is a row-level trigger, there should be
    2260             :                          * a wholerow attribute.
    2261             :                          */
    2262           0 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2263             :                     }
    2264             :                     else
    2265             :                     {
    2266          30 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2267          30 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2268           0 :                             elog(ERROR, "could not find junk wholerow column");
    2269             :                     }
    2270             :                 }
    2271             : 
    2272        1043 :                 resultRelInfo->ri_junkFilter = j;
    2273        1043 :                 resultRelInfo++;
    2274             :             }
    2275             :         }
    2276             :         else
    2277             :         {
    2278        3557 :             if (operation == CMD_INSERT)
    2279        3557 :                 ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc,
    2280             :                                     subplan->targetlist);
    2281             :         }
    2282             :     }
    2283             : 
    2284             :     /*
    2285             :      * Set up a tuple table slot for use for trigger output tuples. In a plan
    2286             :      * containing multiple ModifyTable nodes, all can share one such slot, so
    2287             :      * we keep it in the estate.
    2288             :      */
    2289        4484 :     if (estate->es_trig_tuple_slot == NULL)
    2290        4470 :         estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate);
    2291             : 
    2292             :     /*
    2293             :      * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
    2294             :      * to estate->es_auxmodifytables so that it will be run to completion by
    2295             :      * ExecPostprocessPlan.  (It'd actually work fine to add the primary
    2296             :      * ModifyTable node too, but there's no need.)  Note the use of lcons not
    2297             :      * lappend: we need later-initialized ModifyTable nodes to be shut down
    2298             :      * before earlier ones.  This ensures that we don't throw away RETURNING
    2299             :      * rows that need to be seen by a later CTE subplan.
    2300             :      */
    2301        4484 :     if (!mtstate->canSetTag)
    2302         124 :         estate->es_auxmodifytables = lcons(mtstate,
    2303             :                                            estate->es_auxmodifytables);
    2304             : 
    2305        4484 :     return mtstate;
    2306             : }
    2307             : 
    2308             : /* ----------------------------------------------------------------
    2309             :  *      ExecEndModifyTable
    2310             :  *
    2311             :  *      Shuts down the plan.
    2312             :  *
    2313             :  *      Returns nothing of interest.
    2314             :  * ----------------------------------------------------------------
    2315             :  */
    2316             : void
    2317        4161 : ExecEndModifyTable(ModifyTableState *node)
    2318             : {
    2319             :     int         i;
    2320             : 
    2321             :     /* Free transition tables */
    2322        4161 :     if (node->mt_transition_capture != NULL)
    2323          42 :         DestroyTransitionCaptureState(node->mt_transition_capture);
    2324             : 
    2325             :     /*
    2326             :      * Allow any FDWs to shut down
    2327             :      */
    2328        8429 :     for (i = 0; i < node->mt_nplans; i++)
    2329             :     {
    2330        4268 :         ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
    2331             : 
    2332        8536 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    2333        4268 :             resultRelInfo->ri_FdwRoutine != NULL &&
    2334           0 :             resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
    2335           0 :             resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
    2336             :                                                            resultRelInfo);
    2337             :     }
    2338             : 
    2339             :     /*
    2340             :      * Close all the partitioned tables, leaf partitions, and their indices
    2341             :      *
    2342             :      * Remember node->mt_partition_dispatch_info[0] corresponds to the root
    2343             :      * partitioned table, which we must not try to close, because it is the
    2344             :      * main target table of the query that will be closed by ExecEndPlan().
    2345             :      * Also, tupslot is NULL for the root partitioned table.
    2346             :      */
    2347        4183 :     for (i = 1; i < node->mt_num_dispatch; i++)
    2348             :     {
    2349          22 :         PartitionDispatch pd = node->mt_partition_dispatch_info[i];
    2350             : 
    2351          22 :         heap_close(pd->reldesc, NoLock);
    2352          22 :         ExecDropSingleTupleTableSlot(pd->tupslot);
    2353             :     }
    2354        4328 :     for (i = 0; i < node->mt_num_partitions; i++)
    2355             :     {
    2356         167 :         ResultRelInfo *resultRelInfo = node->mt_partitions + i;
    2357             : 
    2358         167 :         ExecCloseIndices(resultRelInfo);
    2359         167 :         heap_close(resultRelInfo->ri_RelationDesc, NoLock);
    2360             :     }
    2361             : 
    2362             :     /* Release the standalone partition tuple descriptor, if any */
    2363        4161 :     if (node->mt_partition_tuple_slot)
    2364          41 :         ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot);
    2365             : 
    2366             :     /*
    2367             :      * Free the exprcontext
    2368             :      */
    2369        4161 :     ExecFreeExprContext(&node->ps);
    2370             : 
    2371             :     /*
    2372             :      * clean out the tuple table
    2373             :      */
    2374        4161 :     ExecClearTuple(node->ps.ps_ResultTupleSlot);
    2375             : 
    2376             :     /*
    2377             :      * Terminate EPQ execution if active
    2378             :      */
    2379        4161 :     EvalPlanQualEnd(&node->mt_epqstate);
    2380             : 
    2381             :     /*
    2382             :      * shut down subplans
    2383             :      */
    2384        8429 :     for (i = 0; i < node->mt_nplans; i++)
    2385        4268 :         ExecEndNode(node->mt_plans[i]);
    2386        4161 : }
    2387             : 
    2388             : void
    2389           0 : ExecReScanModifyTable(ModifyTableState *node)
    2390             : {
    2391             :     /*
    2392             :      * Currently, we don't need to support rescan on ModifyTable nodes. The
    2393             :      * semantics of that would be a bit debatable anyway.
    2394             :      */
    2395           0 :     elog(ERROR, "ExecReScanModifyTable is not implemented");
    2396             : }

Generated by: LCOV version 1.11