Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execIndexing.c
4 : * routines for inserting index tuples and enforcing unique and
5 : * exclusion constraints.
6 : *
7 : * ExecInsertIndexTuples() is the main entry point. It's called after
8 : * inserting a tuple to the heap, and it inserts corresponding index tuples
9 : * into all indexes. At the same time, it enforces any unique and
10 : * exclusion constraints:
11 : *
12 : * Unique Indexes
13 : * --------------
14 : *
15 : * Enforcing a unique constraint is straightforward. When the index AM
16 : * inserts the tuple to the index, it also checks that there are no
17 : * conflicting tuples in the index already. It does so atomically, so that
18 : * even if two backends try to insert the same key concurrently, only one
19 : * of them will succeed. All the logic to ensure atomicity, and to wait
20 : * for in-progress transactions to finish, is handled by the index AM.
21 : *
22 : * If a unique constraint is deferred, we request the index AM to not
23 : * throw an error if a conflict is found. Instead, we make note that there
24 : * was a conflict and return the list of indexes with conflicts to the
25 : * caller. The caller must re-check them later, by calling index_insert()
26 : * with the UNIQUE_CHECK_EXISTING option.
27 : *
28 : * Exclusion Constraints
29 : * ---------------------
30 : *
31 : * Exclusion constraints are different from unique indexes in that when the
32 : * tuple is inserted to the index, the index AM does not check for
33 : * duplicate keys at the same time. After the insertion, we perform a
34 : * separate scan on the index to check for conflicting tuples, and if one
35 : * is found, we throw an error and the transaction is aborted. If the
36 : * conflicting tuple's inserter or deleter is in-progress, we wait for it
37 : * to finish first.
38 : *
39 : * There is a chance of deadlock, if two backends insert a tuple at the
40 : * same time, and then perform the scan to check for conflicts. They will
41 : * find each other's tuple, and both try to wait for each other. The
42 : * deadlock detector will detect that, and abort one of the transactions.
43 : * That's fairly harmless, as one of them was bound to abort with a
44 : * "duplicate key error" anyway, although you get a different error
45 : * message.
46 : *
47 : * If an exclusion constraint is deferred, we still perform the conflict
48 : * checking scan immediately after inserting the index tuple. But instead
49 : * of throwing an error if a conflict is found, we return that information
50 : * to the caller. The caller must re-check them later by calling
51 : * check_exclusion_constraint().
52 : *
53 : * Speculative insertion
54 : * ---------------------
55 : *
56 : * Speculative insertion is a two-phase mechanism used to implement
57 : * INSERT ... ON CONFLICT DO UPDATE/NOTHING. The tuple is first inserted
58 : * to the heap and update the indexes as usual, but if a constraint is
59 : * violated, we can still back out the insertion without aborting the whole
60 : * transaction. In an INSERT ... ON CONFLICT statement, if a conflict is
61 : * detected, the inserted tuple is backed out and the ON CONFLICT action is
62 : * executed instead.
63 : *
64 : * Insertion to a unique index works as usual: the index AM checks for
65 : * duplicate keys atomically with the insertion. But instead of throwing
66 : * an error on a conflict, the speculatively inserted heap tuple is backed
67 : * out.
68 : *
69 : * Exclusion constraints are slightly more complicated. As mentioned
70 : * earlier, there is a risk of deadlock when two backends insert the same
71 : * key concurrently. That was not a problem for regular insertions, when
72 : * one of the transactions has to be aborted anyway, but with a speculative
73 : * insertion we cannot let a deadlock happen, because we only want to back
74 : * out the speculatively inserted tuple on conflict, not abort the whole
75 : * transaction.
76 : *
77 : * When a backend detects that the speculative insertion conflicts with
78 : * another in-progress tuple, it has two options:
79 : *
80 : * 1. back out the speculatively inserted tuple, then wait for the other
81 : * transaction, and retry. Or,
82 : * 2. wait for the other transaction, with the speculatively inserted tuple
83 : * still in place.
84 : *
85 : * If two backends insert at the same time, and both try to wait for each
86 : * other, they will deadlock. So option 2 is not acceptable. Option 1
87 : * avoids the deadlock, but it is prone to a livelock instead. Both
88 : * transactions will wake up immediately as the other transaction backs
89 : * out. Then they both retry, and conflict with each other again, lather,
90 : * rinse, repeat.
91 : *
92 : * To avoid the livelock, one of the backends must back out first, and then
93 : * wait, while the other one waits without backing out. It doesn't matter
94 : * which one backs out, so we employ an arbitrary rule that the transaction
95 : * with the higher XID backs out.
96 : *
97 : *
98 : * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
99 : * Portions Copyright (c) 1994, Regents of the University of California
100 : *
101 : *
102 : * IDENTIFICATION
103 : * src/backend/executor/execIndexing.c
104 : *
105 : *-------------------------------------------------------------------------
106 : */
107 : #include "postgres.h"
108 :
109 : #include "access/relscan.h"
110 : #include "access/xact.h"
111 : #include "catalog/index.h"
112 : #include "executor/executor.h"
113 : #include "nodes/nodeFuncs.h"
114 : #include "storage/lmgr.h"
115 : #include "utils/tqual.h"
116 :
117 : /* waitMode argument to check_exclusion_or_unique_constraint() */
118 : typedef enum
119 : {
120 : CEOUC_WAIT,
121 : CEOUC_NOWAIT,
122 : CEOUC_LIVELOCK_PREVENTING_WAIT
123 : } CEOUC_WAIT_MODE;
124 :
125 : static bool check_exclusion_or_unique_constraint(Relation heap, Relation index,
126 : IndexInfo *indexInfo,
127 : ItemPointer tupleid,
128 : Datum *values, bool *isnull,
129 : EState *estate, bool newIndex,
130 : CEOUC_WAIT_MODE waitMode,
131 : bool errorOK,
132 : ItemPointer conflictTid);
133 :
134 : static bool index_recheck_constraint(Relation index, Oid *constr_procs,
135 : Datum *existing_values, bool *existing_isnull,
136 : Datum *new_values);
137 :
138 : /* ----------------------------------------------------------------
139 : * ExecOpenIndices
140 : *
141 : * Find the indices associated with a result relation, open them,
142 : * and save information about them in the result ResultRelInfo.
143 : *
144 : * At entry, caller has already opened and locked
145 : * resultRelInfo->ri_RelationDesc.
146 : * ----------------------------------------------------------------
147 : */
148 : void
149 45641 : ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)
150 : {
151 45641 : Relation resultRelation = resultRelInfo->ri_RelationDesc;
152 : List *indexoidlist;
153 : ListCell *l;
154 : int len,
155 : i;
156 : RelationPtr relationDescs;
157 : IndexInfo **indexInfoArray;
158 :
159 45641 : resultRelInfo->ri_NumIndices = 0;
160 :
161 : /* fast path if no indexes */
162 45641 : if (!RelationGetForm(resultRelation)->relhasindex)
163 689 : return;
164 :
165 : /*
166 : * Get cached list of index OIDs
167 : */
168 44952 : indexoidlist = RelationGetIndexList(resultRelation);
169 44952 : len = list_length(indexoidlist);
170 44952 : if (len == 0)
171 16 : return;
172 :
173 : /*
174 : * allocate space for result arrays
175 : */
176 44936 : relationDescs = (RelationPtr) palloc(len * sizeof(Relation));
177 44936 : indexInfoArray = (IndexInfo **) palloc(len * sizeof(IndexInfo *));
178 :
179 44936 : resultRelInfo->ri_NumIndices = len;
180 44936 : resultRelInfo->ri_IndexRelationDescs = relationDescs;
181 44936 : resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
182 :
183 : /*
184 : * For each index, open the index relation and save pg_index info. We
185 : * acquire RowExclusiveLock, signifying we will update the index.
186 : *
187 : * Note: we do this even if the index is not IndexIsReady; it's not worth
188 : * the trouble to optimize for the case where it isn't.
189 : */
190 44936 : i = 0;
191 141087 : foreach(l, indexoidlist)
192 : {
193 96151 : Oid indexOid = lfirst_oid(l);
194 : Relation indexDesc;
195 : IndexInfo *ii;
196 :
197 96151 : indexDesc = index_open(indexOid, RowExclusiveLock);
198 :
199 : /* extract index key information from the index's pg_index info */
200 96151 : ii = BuildIndexInfo(indexDesc);
201 :
202 : /*
203 : * If the indexes are to be used for speculative insertion, add extra
204 : * information required by unique index entries.
205 : */
206 96151 : if (speculative && ii->ii_Unique)
207 175 : BuildSpeculativeIndexInfo(indexDesc, ii);
208 :
209 96151 : relationDescs[i] = indexDesc;
210 96151 : indexInfoArray[i] = ii;
211 96151 : i++;
212 : }
213 :
214 44936 : list_free(indexoidlist);
215 : }
216 :
217 : /* ----------------------------------------------------------------
218 : * ExecCloseIndices
219 : *
220 : * Close the index relations stored in resultRelInfo
221 : * ----------------------------------------------------------------
222 : */
223 : void
224 48838 : ExecCloseIndices(ResultRelInfo *resultRelInfo)
225 : {
226 : int i;
227 : int numIndices;
228 : RelationPtr indexDescs;
229 :
230 48838 : numIndices = resultRelInfo->ri_NumIndices;
231 48838 : indexDescs = resultRelInfo->ri_IndexRelationDescs;
232 :
233 144855 : for (i = 0; i < numIndices; i++)
234 : {
235 96017 : if (indexDescs[i] == NULL)
236 0 : continue; /* shouldn't happen? */
237 :
238 : /* Drop lock acquired by ExecOpenIndices */
239 96017 : index_close(indexDescs[i], RowExclusiveLock);
240 : }
241 :
242 : /*
243 : * XXX should free indexInfo array here too? Currently we assume that
244 : * such stuff will be cleaned up automatically in FreeExecutorState.
245 : */
246 48838 : }
247 :
248 : /* ----------------------------------------------------------------
249 : * ExecInsertIndexTuples
250 : *
251 : * This routine takes care of inserting index tuples
252 : * into all the relations indexing the result relation
253 : * when a heap tuple is inserted into the result relation.
254 : *
255 : * Unique and exclusion constraints are enforced at the same
256 : * time. This returns a list of index OIDs for any unique or
257 : * exclusion constraints that are deferred and that had
258 : * potential (unconfirmed) conflicts. (if noDupErr == true,
259 : * the same is done for non-deferred constraints, but report
260 : * if conflict was speculative or deferred conflict to caller)
261 : *
262 : * If 'arbiterIndexes' is nonempty, noDupErr applies only to
263 : * those indexes. NIL means noDupErr applies to all indexes.
264 : *
265 : * CAUTION: this must not be called for a HOT update.
266 : * We can't defend against that here for lack of info.
267 : * Should we change the API to make it safer?
268 : * ----------------------------------------------------------------
269 : */
270 : List *
271 167647 : ExecInsertIndexTuples(TupleTableSlot *slot,
272 : ItemPointer tupleid,
273 : EState *estate,
274 : bool noDupErr,
275 : bool *specConflict,
276 : List *arbiterIndexes)
277 : {
278 167647 : List *result = NIL;
279 : ResultRelInfo *resultRelInfo;
280 : int i;
281 : int numIndices;
282 : RelationPtr relationDescs;
283 : Relation heapRelation;
284 : IndexInfo **indexInfoArray;
285 : ExprContext *econtext;
286 : Datum values[INDEX_MAX_KEYS];
287 : bool isnull[INDEX_MAX_KEYS];
288 :
289 : /*
290 : * Get information from the result relation info structure.
291 : */
292 167647 : resultRelInfo = estate->es_result_relation_info;
293 167647 : numIndices = resultRelInfo->ri_NumIndices;
294 167647 : relationDescs = resultRelInfo->ri_IndexRelationDescs;
295 167647 : indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
296 167647 : heapRelation = resultRelInfo->ri_RelationDesc;
297 :
298 : /*
299 : * We will use the EState's per-tuple context for evaluating predicates
300 : * and index expressions (creating it if it's not already there).
301 : */
302 167647 : econtext = GetPerTupleExprContext(estate);
303 :
304 : /* Arrange for econtext's scan tuple to be the tuple under test */
305 167647 : econtext->ecxt_scantuple = slot;
306 :
307 : /*
308 : * for each index, form and insert the index tuple
309 : */
310 363376 : for (i = 0; i < numIndices; i++)
311 : {
312 195761 : Relation indexRelation = relationDescs[i];
313 : IndexInfo *indexInfo;
314 : bool applyNoDupErr;
315 : IndexUniqueCheck checkUnique;
316 : bool satisfiesConstraint;
317 :
318 195761 : if (indexRelation == NULL)
319 0 : continue;
320 :
321 195761 : indexInfo = indexInfoArray[i];
322 :
323 : /* If the index is marked as read-only, ignore it */
324 195761 : if (!indexInfo->ii_ReadyForInserts)
325 0 : continue;
326 :
327 : /* Check for partial index */
328 195761 : if (indexInfo->ii_Predicate != NIL)
329 : {
330 : ExprState *predicate;
331 :
332 : /*
333 : * If predicate state not set up yet, create it (in the estate's
334 : * per-query context)
335 : */
336 3014 : predicate = indexInfo->ii_PredicateState;
337 3014 : if (predicate == NULL)
338 : {
339 15 : predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
340 15 : indexInfo->ii_PredicateState = predicate;
341 : }
342 :
343 : /* Skip this index-update if the predicate isn't satisfied */
344 3014 : if (!ExecQual(predicate, econtext))
345 2887 : continue;
346 : }
347 :
348 : /*
349 : * FormIndexDatum fills in its values and isnull parameters with the
350 : * appropriate values for the column(s) of the index.
351 : */
352 192874 : FormIndexDatum(indexInfo,
353 : slot,
354 : estate,
355 : values,
356 : isnull);
357 :
358 : /* Check whether to apply noDupErr to this index */
359 192929 : applyNoDupErr = noDupErr &&
360 57 : (arbiterIndexes == NIL ||
361 57 : list_member_oid(arbiterIndexes,
362 57 : indexRelation->rd_index->indexrelid));
363 :
364 : /*
365 : * The index AM does the actual insertion, plus uniqueness checking.
366 : *
367 : * For an immediate-mode unique index, we just tell the index AM to
368 : * throw error if not unique.
369 : *
370 : * For a deferrable unique index, we tell the index AM to just detect
371 : * possible non-uniqueness, and we add the index OID to the result
372 : * list if further checking is needed.
373 : *
374 : * For a speculative insertion (used by INSERT ... ON CONFLICT), do
375 : * the same as for a deferrable unique index.
376 : */
377 192874 : if (!indexRelation->rd_index->indisunique)
378 162362 : checkUnique = UNIQUE_CHECK_NO;
379 30512 : else if (applyNoDupErr)
380 55 : checkUnique = UNIQUE_CHECK_PARTIAL;
381 30457 : else if (indexRelation->rd_index->indimmediate)
382 30436 : checkUnique = UNIQUE_CHECK_YES;
383 : else
384 21 : checkUnique = UNIQUE_CHECK_PARTIAL;
385 :
386 192874 : satisfiesConstraint =
387 : index_insert(indexRelation, /* index relation */
388 : values, /* array of index Datums */
389 : isnull, /* null flags */
390 : tupleid, /* tid of heap tuple */
391 : heapRelation, /* heap relation */
392 : checkUnique, /* type of uniqueness check to do */
393 : indexInfo); /* index AM may need this */
394 :
395 : /*
396 : * If the index has an associated exclusion constraint, check that.
397 : * This is simpler than the process for uniqueness checks since we
398 : * always insert first and then check. If the constraint is deferred,
399 : * we check now anyway, but don't throw error on violation or wait for
400 : * a conclusive outcome from a concurrent insertion; instead we'll
401 : * queue a recheck event. Similarly, noDupErr callers (speculative
402 : * inserters) will recheck later, and wait for a conclusive outcome
403 : * then.
404 : *
405 : * An index for an exclusion constraint can't also be UNIQUE (not an
406 : * essential property, we just don't allow it in the grammar), so no
407 : * need to preserve the prior state of satisfiesConstraint.
408 : */
409 192847 : if (indexInfo->ii_ExclusionOps != NULL)
410 : {
411 : bool violationOK;
412 : CEOUC_WAIT_MODE waitMode;
413 :
414 23 : if (applyNoDupErr)
415 : {
416 0 : violationOK = true;
417 0 : waitMode = CEOUC_LIVELOCK_PREVENTING_WAIT;
418 : }
419 23 : else if (!indexRelation->rd_index->indimmediate)
420 : {
421 7 : violationOK = true;
422 7 : waitMode = CEOUC_NOWAIT;
423 : }
424 : else
425 : {
426 16 : violationOK = false;
427 16 : waitMode = CEOUC_WAIT;
428 : }
429 :
430 23 : satisfiesConstraint =
431 23 : check_exclusion_or_unique_constraint(heapRelation,
432 : indexRelation, indexInfo,
433 : tupleid, values, isnull,
434 : estate, false,
435 : waitMode, violationOK, NULL);
436 : }
437 :
438 385608 : if ((checkUnique == UNIQUE_CHECK_PARTIAL ||
439 192860 : indexInfo->ii_ExclusionOps != NULL) &&
440 : !satisfiesConstraint)
441 : {
442 : /*
443 : * The tuple potentially violates the uniqueness or exclusion
444 : * constraint, so make a note of the index so that we can re-check
445 : * it later. Speculative inserters are told if there was a
446 : * speculative conflict, since that always requires a restart.
447 : */
448 18 : result = lappend_oid(result, RelationGetRelid(indexRelation));
449 18 : if (indexRelation->rd_index->indimmediate && specConflict)
450 0 : *specConflict = true;
451 : }
452 : }
453 :
454 167615 : return result;
455 : }
456 :
457 : /* ----------------------------------------------------------------
458 : * ExecCheckIndexConstraints
459 : *
460 : * This routine checks if a tuple violates any unique or
461 : * exclusion constraints. Returns true if there is no conflict.
462 : * Otherwise returns false, and the TID of the conflicting
463 : * tuple is returned in *conflictTid.
464 : *
465 : * If 'arbiterIndexes' is given, only those indexes are checked.
466 : * NIL means all indexes.
467 : *
468 : * Note that this doesn't lock the values in any way, so it's
469 : * possible that a conflicting tuple is inserted immediately
470 : * after this returns. But this can be used for a pre-check
471 : * before insertion.
472 : * ----------------------------------------------------------------
473 : */
474 : bool
475 144 : ExecCheckIndexConstraints(TupleTableSlot *slot,
476 : EState *estate, ItemPointer conflictTid,
477 : List *arbiterIndexes)
478 : {
479 : ResultRelInfo *resultRelInfo;
480 : int i;
481 : int numIndices;
482 : RelationPtr relationDescs;
483 : Relation heapRelation;
484 : IndexInfo **indexInfoArray;
485 : ExprContext *econtext;
486 : Datum values[INDEX_MAX_KEYS];
487 : bool isnull[INDEX_MAX_KEYS];
488 : ItemPointerData invalidItemPtr;
489 144 : bool checkedIndex = false;
490 :
491 144 : ItemPointerSetInvalid(conflictTid);
492 144 : ItemPointerSetInvalid(&invalidItemPtr);
493 :
494 : /*
495 : * Get information from the result relation info structure.
496 : */
497 144 : resultRelInfo = estate->es_result_relation_info;
498 144 : numIndices = resultRelInfo->ri_NumIndices;
499 144 : relationDescs = resultRelInfo->ri_IndexRelationDescs;
500 144 : indexInfoArray = resultRelInfo->ri_IndexRelationInfo;
501 144 : heapRelation = resultRelInfo->ri_RelationDesc;
502 :
503 : /*
504 : * We will use the EState's per-tuple context for evaluating predicates
505 : * and index expressions (creating it if it's not already there).
506 : */
507 144 : econtext = GetPerTupleExprContext(estate);
508 :
509 : /* Arrange for econtext's scan tuple to be the tuple under test */
510 144 : econtext->ecxt_scantuple = slot;
511 :
512 : /*
513 : * For each index, form index tuple and check if it satisfies the
514 : * constraint.
515 : */
516 209 : for (i = 0; i < numIndices; i++)
517 : {
518 154 : Relation indexRelation = relationDescs[i];
519 : IndexInfo *indexInfo;
520 : bool satisfiesConstraint;
521 :
522 154 : if (indexRelation == NULL)
523 0 : continue;
524 :
525 154 : indexInfo = indexInfoArray[i];
526 :
527 154 : if (!indexInfo->ii_Unique && !indexInfo->ii_ExclusionOps)
528 0 : continue;
529 :
530 : /* If the index is marked as read-only, ignore it */
531 154 : if (!indexInfo->ii_ReadyForInserts)
532 0 : continue;
533 :
534 : /* When specific arbiter indexes requested, only examine them */
535 299 : if (arbiterIndexes != NIL &&
536 145 : !list_member_oid(arbiterIndexes,
537 145 : indexRelation->rd_index->indexrelid))
538 10 : continue;
539 :
540 144 : if (!indexRelation->rd_index->indimmediate)
541 1 : ereport(ERROR,
542 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
543 : errmsg("ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters"),
544 : errtableconstraint(heapRelation,
545 : RelationGetRelationName(indexRelation))));
546 :
547 143 : checkedIndex = true;
548 :
549 : /* Check for partial index */
550 143 : if (indexInfo->ii_Predicate != NIL)
551 : {
552 : ExprState *predicate;
553 :
554 : /*
555 : * If predicate state not set up yet, create it (in the estate's
556 : * per-query context)
557 : */
558 5 : predicate = indexInfo->ii_PredicateState;
559 5 : if (predicate == NULL)
560 : {
561 5 : predicate = ExecPrepareQual(indexInfo->ii_Predicate, estate);
562 5 : indexInfo->ii_PredicateState = predicate;
563 : }
564 :
565 : /* Skip this index-update if the predicate isn't satisfied */
566 5 : if (!ExecQual(predicate, econtext))
567 0 : continue;
568 : }
569 :
570 : /*
571 : * FormIndexDatum fills in its values and isnull parameters with the
572 : * appropriate values for the column(s) of the index.
573 : */
574 143 : FormIndexDatum(indexInfo,
575 : slot,
576 : estate,
577 : values,
578 : isnull);
579 :
580 143 : satisfiesConstraint =
581 : check_exclusion_or_unique_constraint(heapRelation, indexRelation,
582 : indexInfo, &invalidItemPtr,
583 : values, isnull, estate, false,
584 : CEOUC_WAIT, true,
585 : conflictTid);
586 143 : if (!satisfiesConstraint)
587 88 : return false;
588 : }
589 :
590 55 : if (arbiterIndexes != NIL && !checkedIndex)
591 0 : elog(ERROR, "unexpected failure to find arbiter index");
592 :
593 55 : return true;
594 : }
595 :
596 : /*
597 : * Check for violation of an exclusion or unique constraint
598 : *
599 : * heap: the table containing the new tuple
600 : * index: the index supporting the constraint
601 : * indexInfo: info about the index, including the exclusion properties
602 : * tupleid: heap TID of the new tuple we have just inserted (invalid if we
603 : * haven't inserted a new tuple yet)
604 : * values, isnull: the *index* column values computed for the new tuple
605 : * estate: an EState we can do evaluation in
606 : * newIndex: if true, we are trying to build a new index (this affects
607 : * only the wording of error messages)
608 : * waitMode: whether to wait for concurrent inserters/deleters
609 : * violationOK: if true, don't throw error for violation
610 : * conflictTid: if not-NULL, the TID of the conflicting tuple is returned here
611 : *
612 : * Returns true if OK, false if actual or potential violation
613 : *
614 : * 'waitMode' determines what happens if a conflict is detected with a tuple
615 : * that was inserted or deleted by a transaction that's still running.
616 : * CEOUC_WAIT means that we wait for the transaction to commit, before
617 : * throwing an error or returning. CEOUC_NOWAIT means that we report the
618 : * violation immediately; so the violation is only potential, and the caller
619 : * must recheck sometime later. This behavior is convenient for deferred
620 : * exclusion checks; we need not bother queuing a deferred event if there is
621 : * definitely no conflict at insertion time.
622 : *
623 : * CEOUC_LIVELOCK_PREVENTING_WAIT is like CEOUC_NOWAIT, but we will sometimes
624 : * wait anyway, to prevent livelocking if two transactions try inserting at
625 : * the same time. This is used with speculative insertions, for INSERT ON
626 : * CONFLICT statements. (See notes in file header)
627 : *
628 : * If violationOK is true, we just report the potential or actual violation to
629 : * the caller by returning 'false'. Otherwise we throw a descriptive error
630 : * message here. When violationOK is false, a false result is impossible.
631 : *
632 : * Note: The indexam is normally responsible for checking unique constraints,
633 : * so this normally only needs to be used for exclusion constraints. But this
634 : * function is also called when doing a "pre-check" for conflicts on a unique
635 : * constraint, when doing speculative insertion. Caller may use the returned
636 : * conflict TID to take further steps.
637 : */
638 : static bool
639 175 : check_exclusion_or_unique_constraint(Relation heap, Relation index,
640 : IndexInfo *indexInfo,
641 : ItemPointer tupleid,
642 : Datum *values, bool *isnull,
643 : EState *estate, bool newIndex,
644 : CEOUC_WAIT_MODE waitMode,
645 : bool violationOK,
646 : ItemPointer conflictTid)
647 : {
648 : Oid *constr_procs;
649 : uint16 *constr_strats;
650 175 : Oid *index_collations = index->rd_indcollation;
651 175 : int index_natts = index->rd_index->indnatts;
652 : IndexScanDesc index_scan;
653 : HeapTuple tup;
654 : ScanKeyData scankeys[INDEX_MAX_KEYS];
655 : SnapshotData DirtySnapshot;
656 : int i;
657 : bool conflict;
658 : bool found_self;
659 : ExprContext *econtext;
660 : TupleTableSlot *existing_slot;
661 : TupleTableSlot *save_scantuple;
662 :
663 175 : if (indexInfo->ii_ExclusionOps)
664 : {
665 34 : constr_procs = indexInfo->ii_ExclusionProcs;
666 34 : constr_strats = indexInfo->ii_ExclusionStrats;
667 : }
668 : else
669 : {
670 141 : constr_procs = indexInfo->ii_UniqueProcs;
671 141 : constr_strats = indexInfo->ii_UniqueStrats;
672 : }
673 :
674 : /*
675 : * If any of the input values are NULL, the constraint check is assumed to
676 : * pass (i.e., we assume the operators are strict).
677 : */
678 376 : for (i = 0; i < index_natts; i++)
679 : {
680 201 : if (isnull[i])
681 0 : return true;
682 : }
683 :
684 : /*
685 : * Search the tuples that are in the index for any violations, including
686 : * tuples that aren't visible yet.
687 : */
688 175 : InitDirtySnapshot(DirtySnapshot);
689 :
690 376 : for (i = 0; i < index_natts; i++)
691 : {
692 804 : ScanKeyEntryInitialize(&scankeys[i],
693 : 0,
694 : i + 1,
695 201 : constr_strats[i],
696 : InvalidOid,
697 201 : index_collations[i],
698 201 : constr_procs[i],
699 201 : values[i]);
700 : }
701 :
702 : /*
703 : * Need a TupleTableSlot to put existing tuples in.
704 : *
705 : * To use FormIndexDatum, we have to make the econtext's scantuple point
706 : * to this slot. Be sure to save and restore caller's value for
707 : * scantuple.
708 : */
709 175 : existing_slot = MakeSingleTupleTableSlot(RelationGetDescr(heap));
710 :
711 175 : econtext = GetPerTupleExprContext(estate);
712 175 : save_scantuple = econtext->ecxt_scantuple;
713 175 : econtext->ecxt_scantuple = existing_slot;
714 :
715 : /*
716 : * May have to restart scan from this point if a potential conflict is
717 : * found.
718 : */
719 : retry:
720 175 : conflict = false;
721 175 : found_self = false;
722 175 : index_scan = index_beginscan(heap, index, &DirtySnapshot, index_natts, 0);
723 175 : index_rescan(index_scan, scankeys, index_natts, NULL, 0);
724 :
725 386 : while ((tup = index_getnext(index_scan,
726 : ForwardScanDirection)) != NULL)
727 : {
728 : TransactionId xwait;
729 : ItemPointerData ctid_wait;
730 : XLTW_Oper reason_wait;
731 : Datum existing_values[INDEX_MAX_KEYS];
732 : bool existing_isnull[INDEX_MAX_KEYS];
733 : char *error_new;
734 : char *error_existing;
735 :
736 : /*
737 : * Ignore the entry for the tuple we're trying to check.
738 : */
739 188 : if (ItemPointerIsValid(tupleid) &&
740 50 : ItemPointerEquals(tupleid, &tup->t_self))
741 : {
742 27 : if (found_self) /* should not happen */
743 0 : elog(ERROR, "found self tuple multiple times in index \"%s\"",
744 : RelationGetRelationName(index));
745 27 : found_self = true;
746 63 : continue;
747 : }
748 :
749 : /*
750 : * Extract the index column values and isnull flags from the existing
751 : * tuple.
752 : */
753 111 : ExecStoreTuple(tup, existing_slot, InvalidBuffer, false);
754 111 : FormIndexDatum(indexInfo, existing_slot, estate,
755 : existing_values, existing_isnull);
756 :
757 : /* If lossy indexscan, must recheck the condition */
758 111 : if (index_scan->xs_recheck)
759 : {
760 12 : if (!index_recheck_constraint(index,
761 : constr_procs,
762 : existing_values,
763 : existing_isnull,
764 : values))
765 9 : continue; /* tuple doesn't actually match, so no
766 : * conflict */
767 : }
768 :
769 : /*
770 : * At this point we have either a conflict or a potential conflict.
771 : *
772 : * If an in-progress transaction is affecting the visibility of this
773 : * tuple, we need to wait for it to complete and then recheck (unless
774 : * the caller requested not to). For simplicity we do rechecking by
775 : * just restarting the whole scan --- this case probably doesn't
776 : * happen often enough to be worth trying harder, and anyway we don't
777 : * want to hold any index internal locks while waiting.
778 : */
779 204 : xwait = TransactionIdIsValid(DirtySnapshot.xmin) ?
780 102 : DirtySnapshot.xmin : DirtySnapshot.xmax;
781 :
782 102 : if (TransactionIdIsValid(xwait) &&
783 0 : (waitMode == CEOUC_WAIT ||
784 0 : (waitMode == CEOUC_LIVELOCK_PREVENTING_WAIT &&
785 0 : DirtySnapshot.speculativeToken &&
786 0 : TransactionIdPrecedes(GetCurrentTransactionId(), xwait))))
787 : {
788 0 : ctid_wait = tup->t_data->t_ctid;
789 0 : reason_wait = indexInfo->ii_ExclusionOps ?
790 : XLTW_RecheckExclusionConstr : XLTW_InsertIndex;
791 0 : index_endscan(index_scan);
792 0 : if (DirtySnapshot.speculativeToken)
793 0 : SpeculativeInsertionWait(DirtySnapshot.xmin,
794 : DirtySnapshot.speculativeToken);
795 : else
796 0 : XactLockTableWait(xwait, heap, &ctid_wait, reason_wait);
797 0 : goto retry;
798 : }
799 :
800 : /*
801 : * We have a definite conflict (or a potential one, but the caller
802 : * didn't want to wait). Return it to caller, or report it.
803 : */
804 102 : if (violationOK)
805 : {
806 92 : conflict = true;
807 92 : if (conflictTid)
808 88 : *conflictTid = tup->t_self;
809 92 : break;
810 : }
811 :
812 10 : error_new = BuildIndexValueDescription(index, values, isnull);
813 10 : error_existing = BuildIndexValueDescription(index, existing_values,
814 : existing_isnull);
815 10 : if (newIndex)
816 2 : ereport(ERROR,
817 : (errcode(ERRCODE_EXCLUSION_VIOLATION),
818 : errmsg("could not create exclusion constraint \"%s\"",
819 : RelationGetRelationName(index)),
820 : error_new && error_existing ?
821 : errdetail("Key %s conflicts with key %s.",
822 : error_new, error_existing) :
823 : errdetail("Key conflicts exist."),
824 : errtableconstraint(heap,
825 : RelationGetRelationName(index))));
826 : else
827 8 : ereport(ERROR,
828 : (errcode(ERRCODE_EXCLUSION_VIOLATION),
829 : errmsg("conflicting key value violates exclusion constraint \"%s\"",
830 : RelationGetRelationName(index)),
831 : error_new && error_existing ?
832 : errdetail("Key %s conflicts with existing key %s.",
833 : error_new, error_existing) :
834 : errdetail("Key conflicts with existing key."),
835 : errtableconstraint(heap,
836 : RelationGetRelationName(index))));
837 : }
838 :
839 165 : index_endscan(index_scan);
840 :
841 : /*
842 : * Ordinarily, at this point the search should have found the originally
843 : * inserted tuple (if any), unless we exited the loop early because of
844 : * conflict. However, it is possible to define exclusion constraints for
845 : * which that wouldn't be true --- for instance, if the operator is <>. So
846 : * we no longer complain if found_self is still false.
847 : */
848 :
849 165 : econtext->ecxt_scantuple = save_scantuple;
850 :
851 165 : ExecDropSingleTupleTableSlot(existing_slot);
852 :
853 165 : return !conflict;
854 : }
855 :
856 : /*
857 : * Check for violation of an exclusion constraint
858 : *
859 : * This is a dumbed down version of check_exclusion_or_unique_constraint
860 : * for external callers. They don't need all the special modes.
861 : */
862 : void
863 9 : check_exclusion_constraint(Relation heap, Relation index,
864 : IndexInfo *indexInfo,
865 : ItemPointer tupleid,
866 : Datum *values, bool *isnull,
867 : EState *estate, bool newIndex)
868 : {
869 9 : (void) check_exclusion_or_unique_constraint(heap, index, indexInfo, tupleid,
870 : values, isnull,
871 : estate, newIndex,
872 : CEOUC_WAIT, false, NULL);
873 4 : }
874 :
875 : /*
876 : * Check existing tuple's index values to see if it really matches the
877 : * exclusion condition against the new_values. Returns true if conflict.
878 : */
879 : static bool
880 12 : index_recheck_constraint(Relation index, Oid *constr_procs,
881 : Datum *existing_values, bool *existing_isnull,
882 : Datum *new_values)
883 : {
884 12 : int index_natts = index->rd_index->indnatts;
885 : int i;
886 :
887 24 : for (i = 0; i < index_natts; i++)
888 : {
889 : /* Assume the exclusion operators are strict */
890 21 : if (existing_isnull[i])
891 0 : return false;
892 :
893 21 : if (!DatumGetBool(OidFunctionCall2Coll(constr_procs[i],
894 : index->rd_indcollation[i],
895 : existing_values[i],
896 : new_values[i])))
897 9 : return false;
898 : }
899 :
900 3 : return true;
901 : }
|