Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * MERGE runs a join between the source relation and the target
28 : * table; if any WHEN NOT MATCHED clauses are present, then the
29 : * join is an outer join. In this case, any unmatched tuples will
30 : * have NULL row-locating info, and only INSERT can be run. But for
31 : * matched tuples, then row-locating info is used to determine the
32 : * tuple to UPDATE or DELETE. When all clauses are WHEN MATCHED,
33 : * then an inner join is used, so all tuples contain row-locating info.
34 : *
35 : * If the query specifies RETURNING, then the ModifyTable returns a
36 : * RETURNING tuple after completing each row insert, update, or delete.
37 : * It must be called again to continue the operation. Without RETURNING,
38 : * we just loop within the node until all the work is done, then
39 : * return NULL. This avoids useless call/return overhead. (MERGE does
40 : * not support RETURNING.)
41 : */
42 :
43 : #include "postgres.h"
44 :
45 : #include "access/heapam.h"
46 : #include "access/htup_details.h"
47 : #include "access/tableam.h"
48 : #include "access/xact.h"
49 : #include "catalog/catalog.h"
50 : #include "commands/trigger.h"
51 : #include "executor/execPartition.h"
52 : #include "executor/executor.h"
53 : #include "executor/nodeModifyTable.h"
54 : #include "foreign/fdwapi.h"
55 : #include "miscadmin.h"
56 : #include "nodes/nodeFuncs.h"
57 : #include "optimizer/optimizer.h"
58 : #include "rewrite/rewriteHandler.h"
59 : #include "storage/bufmgr.h"
60 : #include "storage/lmgr.h"
61 : #include "utils/builtins.h"
62 : #include "utils/datum.h"
63 : #include "utils/memutils.h"
64 : #include "utils/rel.h"
65 :
66 :
67 : typedef struct MTTargetRelLookup
68 : {
69 : Oid relationOid; /* hash key, must be first */
70 : int relationIndex; /* rel's index in resultRelInfo[] array */
71 : } MTTargetRelLookup;
72 :
73 : /*
74 : * Context struct for a ModifyTable operation, containing basic execution
75 : * state and some output variables populated by ExecUpdateAct() and
76 : * ExecDeleteAct() to report the result of their actions to callers.
77 : */
78 : typedef struct ModifyTableContext
79 : {
80 : /* Operation state */
81 : ModifyTableState *mtstate;
82 : EPQState *epqstate;
83 : EState *estate;
84 :
85 : /*
86 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
87 : * access "junk" columns that are not going to be stored.
88 : */
89 : TupleTableSlot *planSlot;
90 :
91 : /* MERGE specific */
92 : MergeActionState *relaction; /* MERGE action in progress */
93 :
94 : /*
95 : * Information about the changes that were made concurrently to a tuple
96 : * being updated or deleted
97 : */
98 : TM_FailureData tmfd;
99 :
100 : /*
101 : * The tuple projected by the INSERT's RETURNING clause, when doing a
102 : * cross-partition UPDATE
103 : */
104 : TupleTableSlot *cpUpdateReturningSlot;
105 : } ModifyTableContext;
106 :
107 : /*
108 : * Context struct containing output data specific to UPDATE operations.
109 : */
110 : typedef struct UpdateContext
111 : {
112 : bool updated; /* did UPDATE actually occur? */
113 : bool crossPartUpdate; /* was it a cross-partition update? */
114 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
115 :
116 : /*
117 : * Lock mode to acquire on the latest tuple version before performing
118 : * EvalPlanQual on it
119 : */
120 : LockTupleMode lockmode;
121 : } UpdateContext;
122 :
123 :
124 : static void ExecBatchInsert(ModifyTableState *mtstate,
125 : ResultRelInfo *resultRelInfo,
126 : TupleTableSlot **slots,
127 : TupleTableSlot **planSlots,
128 : int numSlots,
129 : EState *estate,
130 : bool canSetTag);
131 : static void ExecPendingInserts(EState *estate);
132 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
133 : ResultRelInfo *sourcePartInfo,
134 : ResultRelInfo *destPartInfo,
135 : ItemPointer tupleid,
136 : TupleTableSlot *oldslot,
137 : TupleTableSlot *newslot);
138 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
139 : ResultRelInfo *resultRelInfo,
140 : ItemPointer conflictTid,
141 : TupleTableSlot *excludedSlot,
142 : bool canSetTag,
143 : TupleTableSlot **returning);
144 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
145 : EState *estate,
146 : PartitionTupleRouting *proute,
147 : ResultRelInfo *targetRelInfo,
148 : TupleTableSlot *slot,
149 : ResultRelInfo **partRelInfo);
150 :
151 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
152 : ResultRelInfo *resultRelInfo,
153 : ItemPointer tupleid,
154 : bool canSetTag);
155 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
156 : static bool ExecMergeMatched(ModifyTableContext *context,
157 : ResultRelInfo *resultRelInfo,
158 : ItemPointer tupleid,
159 : bool canSetTag);
160 : static void ExecMergeNotMatched(ModifyTableContext *context,
161 : ResultRelInfo *resultRelInfo,
162 : bool canSetTag);
163 :
164 :
165 : /*
166 : * Verify that the tuples to be produced by INSERT match the
167 : * target relation's rowtype
168 : *
169 : * We do this to guard against stale plans. If plan invalidation is
170 : * functioning properly then we should never get a failure here, but better
171 : * safe than sorry. Note that this is called after we have obtained lock
172 : * on the target rel, so the rowtype can't change underneath us.
173 : *
174 : * The plan output is represented by its targetlist, because that makes
175 : * handling the dropped-column case easier.
176 : *
177 : * We used to use this for UPDATE as well, but now the equivalent checks
178 : * are done in ExecBuildUpdateProjection.
179 : */
180 : static void
4929 tgl 181 CBC 49906 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
182 : {
183 49906 : TupleDesc resultDesc = RelationGetDescr(resultRel);
184 49906 : int attno = 0;
185 : ListCell *lc;
186 :
187 196640 : foreach(lc, targetList)
188 : {
189 146734 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
190 : Form_pg_attribute attr;
191 :
739 192 146734 : Assert(!tle->resjunk); /* caller removed junk items already */
193 :
4929 194 146734 : if (attno >= resultDesc->natts)
4929 tgl 195 UBC 0 : ereport(ERROR,
196 : (errcode(ERRCODE_DATATYPE_MISMATCH),
197 : errmsg("table row type and query-specified row type do not match"),
198 : errdetail("Query has too many columns.")));
2058 andres 199 CBC 146734 : attr = TupleDescAttr(resultDesc, attno);
200 146734 : attno++;
201 :
4929 tgl 202 146734 : if (!attr->attisdropped)
203 : {
204 : /* Normal case: demand type match */
205 146429 : if (exprType((Node *) tle->expr) != attr->atttypid)
4929 tgl 206 UBC 0 : ereport(ERROR,
207 : (errcode(ERRCODE_DATATYPE_MISMATCH),
208 : errmsg("table row type and query-specified row type do not match"),
209 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
210 : format_type_be(attr->atttypid),
211 : attno,
212 : format_type_be(exprType((Node *) tle->expr)))));
213 : }
214 : else
215 : {
216 : /*
217 : * For a dropped column, we can't check atttypid (it's likely 0).
218 : * In any case the planner has most likely inserted an INT4 null.
219 : * What we insist on is just *some* NULL constant.
220 : */
4929 tgl 221 CBC 305 : if (!IsA(tle->expr, Const) ||
222 305 : !((Const *) tle->expr)->constisnull)
4929 tgl 223 UBC 0 : ereport(ERROR,
224 : (errcode(ERRCODE_DATATYPE_MISMATCH),
225 : errmsg("table row type and query-specified row type do not match"),
226 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
227 : attno)));
228 : }
229 : }
4929 tgl 230 CBC 49906 : if (attno != resultDesc->natts)
4929 tgl 231 UBC 0 : ereport(ERROR,
232 : (errcode(ERRCODE_DATATYPE_MISMATCH),
233 : errmsg("table row type and query-specified row type do not match"),
234 : errdetail("Query has too few columns.")));
4929 tgl 235 CBC 49906 : }
236 :
237 : /*
238 : * ExecProcessReturning --- evaluate a RETURNING list
239 : *
240 : * resultRelInfo: current result rel
241 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
242 : * planSlot: slot holding tuple returned by top subplan node
243 : *
244 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
245 : * scan tuple.
246 : *
247 : * Returns a slot holding the result tuple
248 : */
249 : static TupleTableSlot *
2578 rhaas 250 3506 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
251 : TupleTableSlot *tupleSlot,
252 : TupleTableSlot *planSlot)
253 : {
254 3506 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
4929 tgl 255 3506 : ExprContext *econtext = projectReturning->pi_exprContext;
256 :
257 : /* Make tuple and any needed join variables available to ExecProject */
2578 rhaas 258 3506 : if (tupleSlot)
259 3159 : econtext->ecxt_scantuple = tupleSlot;
4929 tgl 260 3506 : econtext->ecxt_outertuple = planSlot;
261 :
262 : /*
263 : * RETURNING expressions might reference the tableoid column, so
264 : * reinitialize tts_tableOid before evaluating them.
265 : */
1503 andres 266 3506 : econtext->ecxt_scantuple->tts_tableOid =
267 3506 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
268 :
269 : /* Compute the RETURNING expressions */
2271 270 3506 : return ExecProject(projectReturning);
271 : }
272 :
273 : /*
274 : * ExecCheckTupleVisible -- verify tuple is visible
275 : *
276 : * It would not be consistent with guarantees of the higher isolation levels to
277 : * proceed with avoiding insertion (taking speculative insertion's alternative
278 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
279 : * Check for the need to raise a serialization failure, and do so as necessary.
280 : */
281 : static void
1478 282 2620 : ExecCheckTupleVisible(EState *estate,
283 : Relation rel,
284 : TupleTableSlot *slot)
285 : {
2893 286 2620 : if (!IsolationUsesXactSnapshot())
287 2588 : return;
288 :
1478 289 32 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
290 : {
291 : Datum xminDatum;
292 : TransactionId xmin;
293 : bool isnull;
294 :
295 20 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
296 20 : Assert(!isnull);
297 20 : xmin = DatumGetTransactionId(xminDatum);
298 :
299 : /*
300 : * We should not raise a serialization failure if the conflict is
301 : * against a tuple inserted by our own transaction, even if it's not
302 : * visible to our snapshot. (This would happen, for example, if
303 : * conflicting keys are proposed for insertion in a single command.)
304 : */
305 20 : if (!TransactionIdIsCurrentTransactionId(xmin))
2359 tgl 306 10 : ereport(ERROR,
307 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
308 : errmsg("could not serialize access due to concurrent update")));
309 : }
310 : }
311 :
312 : /*
313 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
314 : */
315 : static void
2893 andres 316 79 : ExecCheckTIDVisible(EState *estate,
317 : ResultRelInfo *relinfo,
318 : ItemPointer tid,
319 : TupleTableSlot *tempSlot)
320 : {
321 79 : Relation rel = relinfo->ri_RelationDesc;
322 :
323 : /* Redundantly check isolation level */
324 79 : if (!IsolationUsesXactSnapshot())
325 47 : return;
326 :
1417 327 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
2893 andres 328 UBC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
1478 andres 329 CBC 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
330 22 : ExecClearTuple(tempSlot);
331 : }
332 :
333 : /*
334 : * Initialize to compute stored generated columns for a tuple
335 : *
336 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
337 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
338 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
339 : *
340 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
341 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
342 : * cross-partition UPDATEs, since a partition might be the target of both
343 : * UPDATE and INSERT actions.
344 : */
345 : void
94 tgl 346 GIC 29655 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
347 : EState *estate,
348 : CmdType cmdtype)
1471 peter 349 ECB : {
1471 peter 350 GIC 29655 : Relation rel = resultRelInfo->ri_RelationDesc;
351 29655 : TupleDesc tupdesc = RelationGetDescr(rel);
352 29655 : int natts = tupdesc->natts;
353 : ExprState **ri_GeneratedExprs;
354 : int ri_NumGeneratedNeeded;
94 tgl 355 ECB : Bitmapset *updatedCols;
1471 peter 356 : MemoryContext oldContext;
357 :
358 : /* Nothing to do if no generated columns */
94 tgl 359 GIC 29655 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
94 tgl 360 CBC 29265 : return;
1471 peter 361 ECB :
362 : /*
363 : * In an UPDATE, we can skip computing any generated columns that do not
364 : * depend on any UPDATE target column. But if there is a BEFORE ROW
365 : * UPDATE trigger, we cannot skip because the trigger might change more
366 : * columns.
367 : */
94 tgl 368 GIC 390 : if (cmdtype == CMD_UPDATE &&
94 tgl 369 CBC 105 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
370 93 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
94 tgl 371 ECB : else
94 tgl 372 GIC 297 : updatedCols = NULL;
1471 peter 373 ECB :
374 : /*
375 : * Make sure these data structures are built in the per-query memory
376 : * context so they'll survive throughout the query.
377 : */
94 tgl 378 GIC 390 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1471 peter 379 ECB :
34 tgl 380 GNC 390 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
381 390 : ri_NumGeneratedNeeded = 0;
382 :
94 tgl 383 GIC 1435 : for (int i = 0; i < natts; i++)
94 tgl 384 ECB : {
94 tgl 385 CBC 1045 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
1471 peter 386 EUB : {
387 : Expr *expr;
388 :
389 : /* Fetch the GENERATED AS expression tree */
94 tgl 390 GIC 400 : expr = (Expr *) build_column_default(rel, i + 1);
391 400 : if (expr == NULL)
94 tgl 392 UIC 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
94 tgl 393 ECB : i + 1, RelationGetRelationName(rel));
394 :
395 : /*
396 : * If it's an update with a known set of update target columns,
397 : * see if we can skip the computation.
398 : */
94 tgl 399 CBC 400 : if (updatedCols)
94 tgl 400 ECB : {
94 tgl 401 GIC 96 : Bitmapset *attrs_used = NULL;
402 :
403 96 : pull_varattnos((Node *) expr, 1, &attrs_used);
1471 peter 404 ECB :
94 tgl 405 CBC 96 : if (!bms_overlap(updatedCols, attrs_used))
94 tgl 406 GIC 10 : continue; /* need not update this column */
407 : }
1471 peter 408 ECB :
94 tgl 409 : /* No luck, so prepare the expression for execution */
34 tgl 410 GNC 390 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
411 390 : ri_NumGeneratedNeeded++;
412 :
413 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
414 390 : if (cmdtype == CMD_UPDATE)
415 98 : resultRelInfo->ri_extraUpdatedCols =
416 98 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
417 : i + 1 - FirstLowInvalidHeapAttributeNumber);
418 : }
419 : }
1471 peter 420 ECB :
421 : /* Save in appropriate set of fields */
34 tgl 422 GNC 390 : if (cmdtype == CMD_UPDATE)
423 : {
424 : /* Don't call twice */
425 105 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
426 :
427 105 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
428 105 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
429 : }
430 : else
431 : {
432 : /* Don't call twice */
433 285 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
434 :
435 285 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
436 285 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
437 : }
438 :
94 tgl 439 GIC 390 : MemoryContextSwitchTo(oldContext);
94 tgl 440 ECB : }
441 :
442 : /*
443 : * Compute stored generated columns for a tuple
444 : */
445 : void
94 tgl 446 CBC 532 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
447 : EState *estate, TupleTableSlot *slot,
94 tgl 448 ECB : CmdType cmdtype)
449 : {
94 tgl 450 GIC 532 : Relation rel = resultRelInfo->ri_RelationDesc;
451 532 : TupleDesc tupdesc = RelationGetDescr(rel);
94 tgl 452 CBC 532 : int natts = tupdesc->natts;
94 tgl 453 GIC 532 : ExprContext *econtext = GetPerTupleExprContext(estate);
454 : ExprState **ri_GeneratedExprs;
455 : MemoryContext oldContext;
456 : Datum *values;
457 : bool *nulls;
458 :
459 : /* We should not be called unless this is true */
94 tgl 460 CBC 532 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
461 :
462 : /*
463 : * Initialize the expressions if we didn't already, and check whether we
464 : * can exit early because nothing needs to be computed.
1147 peter 465 ECB : */
34 tgl 466 GNC 532 : if (cmdtype == CMD_UPDATE)
467 : {
468 122 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
469 93 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
470 122 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
471 7 : return;
472 115 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
473 : }
474 : else
475 : {
476 410 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
477 285 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
478 : /* Early exit is impossible given the prior Assert */
479 410 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
480 410 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
481 : }
482 :
1471 peter 483 GIC 525 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
484 :
1471 peter 485 CBC 525 : values = palloc(sizeof(*values) * natts);
1471 peter 486 GIC 525 : nulls = palloc(sizeof(*nulls) * natts);
1425 peter 487 ECB :
1425 peter 488 CBC 525 : slot_getallattrs(slot);
489 525 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
1471 peter 490 ECB :
1471 peter 491 CBC 1898 : for (int i = 0; i < natts; i++)
492 : {
1425 peter 493 GIC 1379 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
494 :
34 tgl 495 GNC 1379 : if (ri_GeneratedExprs[i])
1471 peter 496 ECB : {
497 : Datum val;
498 : bool isnull;
499 :
94 tgl 500 GIC 532 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
501 :
1471 peter 502 CBC 532 : econtext->ecxt_scantuple = slot;
503 :
34 tgl 504 GNC 532 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
1471 peter 505 ECB :
506 : /*
1086 drowley 507 : * We must make a copy of val as we have no guarantees about where
508 : * memory for a pass-by-reference Datum is located.
509 : */
1086 drowley 510 CBC 526 : if (!isnull)
1086 drowley 511 GIC 505 : val = datumCopy(val, attr->attbyval, attr->attlen);
1086 drowley 512 ECB :
1471 peter 513 GIC 526 : values[i] = val;
1471 peter 514 CBC 526 : nulls[i] = isnull;
515 : }
516 : else
517 : {
1425 peter 518 GIC 847 : if (!nulls[i])
1425 peter 519 CBC 829 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
520 : }
1471 peter 521 ECB : }
522 :
1425 peter 523 CBC 519 : ExecClearTuple(slot);
1425 peter 524 GIC 519 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
525 519 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
526 519 : ExecStoreVirtualTuple(slot);
527 519 : ExecMaterializeSlot(slot);
528 :
1471 peter 529 CBC 519 : MemoryContextSwitchTo(oldContext);
1471 peter 530 ECB : }
531 :
733 tgl 532 : /*
533 : * ExecInitInsertProjection
534 : * Do one-time initialization of projection data for INSERT tuples.
535 : *
536 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
537 : *
538 : * This is also a convenient place to verify that the
539 : * output of an INSERT matches the target table.
540 : */
541 : static void
733 tgl 542 CBC 49593 : ExecInitInsertProjection(ModifyTableState *mtstate,
733 tgl 543 ECB : ResultRelInfo *resultRelInfo)
544 : {
733 tgl 545 CBC 49593 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
546 49593 : Plan *subplan = outerPlan(node);
733 tgl 547 GIC 49593 : EState *estate = mtstate->ps.state;
733 tgl 548 CBC 49593 : List *insertTargetList = NIL;
733 tgl 549 GIC 49593 : bool need_projection = false;
550 : ListCell *l;
551 :
552 : /* Extract non-junk columns of the subplan's result tlist. */
553 195520 : foreach(l, subplan->targetlist)
554 : {
555 145927 : TargetEntry *tle = (TargetEntry *) lfirst(l);
556 :
557 145927 : if (!tle->resjunk)
558 145927 : insertTargetList = lappend(insertTargetList, tle);
559 : else
733 tgl 560 UIC 0 : need_projection = true;
733 tgl 561 ECB : }
562 :
563 : /*
564 : * The junk-free list must produce a tuple suitable for the result
565 : * relation.
566 : */
733 tgl 567 CBC 49593 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
733 tgl 568 ECB :
569 : /* We'll need a slot matching the table's format. */
733 tgl 570 GIC 49593 : resultRelInfo->ri_newTupleSlot =
571 49593 : table_slot_create(resultRelInfo->ri_RelationDesc,
733 tgl 572 ECB : &estate->es_tupleTable);
573 :
574 : /* Build ProjectionInfo if needed (it probably isn't). */
733 tgl 575 GIC 49593 : if (need_projection)
733 tgl 576 ECB : {
733 tgl 577 LBC 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
578 :
733 tgl 579 EUB : /* need an expression context to do the projection */
733 tgl 580 UIC 0 : if (mtstate->ps.ps_ExprContext == NULL)
581 0 : ExecAssignExprContext(estate, &mtstate->ps);
582 :
583 0 : resultRelInfo->ri_projectNew =
584 0 : ExecBuildProjectionInfo(insertTargetList,
585 : mtstate->ps.ps_ExprContext,
733 tgl 586 ECB : resultRelInfo->ri_newTupleSlot,
587 : &mtstate->ps,
588 : relDesc);
589 : }
590 :
733 tgl 591 GIC 49593 : resultRelInfo->ri_projectNewInfoValid = true;
592 49593 : }
593 :
733 tgl 594 ECB : /*
595 : * ExecInitUpdateProjection
733 tgl 596 EUB : * Do one-time initialization of projection data for UPDATE tuples.
597 : *
598 : * UPDATE always needs a projection, because (1) there's always some junk
599 : * attrs, and (2) we may need to merge values of not-updated columns from
600 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
601 : * the subplan contains only new values for the changed columns, plus row
602 : * identity info in the junk attrs.
603 : *
604 : * This is "one-time" for any given result rel, but we might touch more than
605 : * one result rel in the course of an inherited UPDATE, and each one needs
606 : * its own projection due to possible column order variation.
607 : *
608 : * This is also a convenient place to verify that the output of an UPDATE
609 : * matches the target table (ExecBuildUpdateProjection does that).
733 tgl 610 ECB : */
611 : static void
733 tgl 612 GIC 7288 : ExecInitUpdateProjection(ModifyTableState *mtstate,
613 : ResultRelInfo *resultRelInfo)
614 : {
615 7288 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
616 7288 : Plan *subplan = outerPlan(node);
617 7288 : EState *estate = mtstate->ps.state;
618 7288 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
619 : int whichrel;
620 : List *updateColnos;
621 :
622 : /*
623 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
624 : * to, we can get the index the hard way with an integer division.
625 : */
626 7288 : whichrel = mtstate->mt_lastResultIndex;
627 7288 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
628 : {
733 tgl 629 UIC 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
630 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
733 tgl 631 ECB : }
632 :
733 tgl 633 GIC 7288 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
733 tgl 634 ECB :
635 : /*
636 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
637 : * produced by the subplan to get the new tuple. We need two slots, both
638 : * matching the table's desired format.
639 : */
733 tgl 640 GIC 7288 : resultRelInfo->ri_oldTupleSlot =
641 7288 : table_slot_create(resultRelInfo->ri_RelationDesc,
642 : &estate->es_tupleTable);
643 7288 : resultRelInfo->ri_newTupleSlot =
644 7288 : table_slot_create(resultRelInfo->ri_RelationDesc,
733 tgl 645 ECB : &estate->es_tupleTable);
646 :
647 : /* need an expression context to do the projection */
733 tgl 648 GBC 7288 : if (mtstate->ps.ps_ExprContext == NULL)
649 6715 : ExecAssignExprContext(estate, &mtstate->ps);
650 :
733 tgl 651 GIC 7288 : resultRelInfo->ri_projectNew =
733 tgl 652 CBC 7288 : ExecBuildUpdateProjection(subplan->targetlist,
653 : false, /* subplan did the evaluation */
654 : updateColnos,
655 : relDesc,
656 : mtstate->ps.ps_ExprContext,
657 : resultRelInfo->ri_newTupleSlot,
658 : &mtstate->ps);
733 tgl 659 ECB :
733 tgl 660 CBC 7288 : resultRelInfo->ri_projectNewInfoValid = true;
733 tgl 661 GIC 7288 : }
733 tgl 662 ECB :
739 663 : /*
664 : * ExecGetInsertNewTuple
665 : * This prepares a "new" tuple ready to be inserted into given result
666 : * relation, by removing any junk columns of the plan's output tuple
667 : * and (if necessary) coercing the tuple to the right tuple format.
668 : */
669 : static TupleTableSlot *
739 tgl 670 CBC 6032084 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
739 tgl 671 ECB : TupleTableSlot *planSlot)
672 : {
739 tgl 673 GIC 6032084 : ProjectionInfo *newProj = relinfo->ri_projectNew;
674 : ExprContext *econtext;
675 :
676 : /*
677 : * If there's no projection to be done, just make sure the slot is of the
678 : * right type for the target rel. If the planSlot is the right type we
739 tgl 679 ECB : * can use it as-is, else copy the data into ri_newTupleSlot.
680 : */
739 tgl 681 GIC 6032084 : if (newProj == NULL)
682 : {
683 6032084 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
684 : {
685 5648180 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
686 5648180 : return relinfo->ri_newTupleSlot;
687 : }
688 : else
739 tgl 689 CBC 383904 : return planSlot;
690 : }
691 :
739 tgl 692 ECB : /*
693 : * Else project; since the projection output slot is ri_newTupleSlot, this
694 : * will also fix any slot-type problem.
695 : *
696 : * Note: currently, this is dead code, because INSERT cases don't receive
697 : * any junk columns so there's never a projection to be done.
698 : */
739 tgl 699 UIC 0 : econtext = newProj->pi_exprContext;
739 tgl 700 LBC 0 : econtext->ecxt_outertuple = planSlot;
739 tgl 701 UIC 0 : return ExecProject(newProj);
739 tgl 702 ECB : }
703 :
704 : /*
705 : * ExecGetUpdateNewTuple
706 : * This prepares a "new" tuple by combining an UPDATE subplan's output
707 : * tuple (which contains values of changed columns) with unchanged
708 : * columns taken from the old tuple.
709 : *
710 : * The subplan tuple might also contain junk columns, which are ignored.
711 : * Note that the projection also ensures we have a slot of the right type.
712 : */
713 : TupleTableSlot *
739 tgl 714 GIC 183315 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
715 : TupleTableSlot *planSlot,
716 : TupleTableSlot *oldSlot)
717 : {
27 dean.a.rasheed 718 GBC 183315 : ProjectionInfo *newProj = relinfo->ri_projectNew;
27 dean.a.rasheed 719 EUB : ExprContext *econtext;
720 :
721 : /* Use a few extra Asserts to protect against outside callers */
733 tgl 722 GIC 183315 : Assert(relinfo->ri_projectNewInfoValid);
739 723 183315 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
724 183315 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
725 :
726 183315 : econtext = newProj->pi_exprContext;
727 183315 : econtext->ecxt_outertuple = planSlot;
728 183315 : econtext->ecxt_scantuple = oldSlot;
729 183315 : return ExecProject(newProj);
730 : }
731 :
732 : /* ----------------------------------------------------------------
4929 tgl 733 ECB : * ExecInsert
734 : *
735 : * For INSERT, we have to insert the tuple into the target relation
736 : * (or partition thereof) and insert appropriate tuples into the index
907 heikki.linnakangas 737 : * relations.
738 : *
739 : * slot contains the new tuple value to be stored.
740 : *
4929 tgl 741 : * Returns RETURNING result if any, otherwise NULL.
385 alvherre 742 : * *inserted_tuple is the tuple that's effectively inserted;
743 : * *inserted_destrel is the relation where it was inserted.
744 : * These are only set on success.
907 heikki.linnakangas 745 : *
746 : * This may change the currently active tuple conversion map in
747 : * mtstate->mt_transition_capture, so the callers must take care to
748 : * save the previous value to avoid losing track of it.
749 : * ----------------------------------------------------------------
750 : */
751 : static TupleTableSlot *
388 alvherre 752 GIC 6033242 : ExecInsert(ModifyTableContext *context,
753 : ResultRelInfo *resultRelInfo,
754 : TupleTableSlot *slot,
755 : bool canSetTag,
756 : TupleTableSlot **inserted_tuple,
757 : ResultRelInfo **insert_destrel)
758 : {
759 6033242 : ModifyTableState *mtstate = context->mtstate;
760 6033242 : EState *estate = context->estate;
761 : Relation resultRelationDesc;
4929 tgl 762 6033242 : List *recheckIndexes = NIL;
388 alvherre 763 6033242 : TupleTableSlot *planSlot = context->planSlot;
2190 rhaas 764 6033242 : TupleTableSlot *result = NULL;
765 : TransitionCaptureState *ar_insert_trig_tcs;
1847 alvherre 766 6033242 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
767 6033242 : OnConflictAction onconflict = node->onConflictAction;
907 heikki.linnakangas 768 6033242 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
769 : MemoryContext oldContext;
770 :
4929 tgl 771 ECB : /*
772 : * If the input result relation is a partitioned table, find the leaf
773 : * partition to insert the tuple into.
774 : */
907 heikki.linnakangas 775 GIC 6033242 : if (proute)
776 : {
777 : ResultRelInfo *partRelInfo;
907 heikki.linnakangas 778 ECB :
907 heikki.linnakangas 779 CBC 360403 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
780 : resultRelInfo, slot,
907 heikki.linnakangas 781 ECB : &partRelInfo);
907 heikki.linnakangas 782 CBC 360304 : resultRelInfo = partRelInfo;
907 heikki.linnakangas 783 ECB : }
784 :
907 heikki.linnakangas 785 CBC 6033143 : ExecMaterializeSlot(slot);
907 heikki.linnakangas 786 ECB :
4929 tgl 787 CBC 6033143 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
788 :
789 : /*
790 : * Open the table's indexes, if we have not done so already, so that we
791 : * can add new index entries for the inserted tuple.
792 : */
733 tgl 793 GIC 6033143 : if (resultRelationDesc->rd_rel->relhasindex &&
733 tgl 794 CBC 1625101 : resultRelInfo->ri_IndexRelationDescs == NULL)
733 tgl 795 GIC 15209 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
796 :
797 : /*
2893 andres 798 ECB : * BEFORE ROW INSERT Triggers.
799 : *
800 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
801 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
802 : * violations before firing these triggers, because they can change the
803 : * values to insert. Also, they can run arbitrary user-defined code with
804 : * side-effects that we can't cancel by just not inserting the tuple.
805 : */
4929 tgl 806 CBC 6033143 : if (resultRelInfo->ri_TrigDesc &&
4564 tgl 807 GIC 37189 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
808 : {
809 : /* Flush any pending inserts, so rows are visible to the triggers */
135 efujita 810 1016 : if (estate->es_insert_pending_result_relations != NIL)
811 3 : ExecPendingInserts(estate);
135 efujita 812 ECB :
1503 andres 813 CBC 1016 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
814 67 : return NULL; /* "do nothing" */
815 : }
816 :
817 : /* INSTEAD OF ROW INSERT Triggers */
4564 tgl 818 GIC 6033018 : if (resultRelInfo->ri_TrigDesc &&
819 37064 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
820 : {
1503 andres 821 69 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
822 3 : return NULL; /* "do nothing" */
823 : }
3682 tgl 824 6032949 : else if (resultRelInfo->ri_FdwRoutine)
3682 tgl 825 ECB : {
688 826 : /*
827 : * GENERATED expressions might reference the tableoid column, so
828 : * (re-)initialize tts_tableOid before evaluating them.
829 : */
688 tgl 830 CBC 70983 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
831 :
1471 peter 832 ECB : /*
833 : * Compute stored generated columns
834 : */
1471 peter 835 GIC 70983 : if (resultRelationDesc->rd_att->constr &&
836 183 : resultRelationDesc->rd_att->constr->has_generated_stored)
907 heikki.linnakangas 837 CBC 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
907 heikki.linnakangas 838 ECB : CMD_INSERT);
839 :
809 tomas.vondra 840 : /*
841 : * If the FDW supports batching, and batching is requested, accumulate
842 : * rows and insert them in batches. Otherwise use the per-row inserts.
843 : */
809 tomas.vondra 844 GIC 70983 : if (resultRelInfo->ri_BatchSize > 1)
845 : {
135 efujita 846 70120 : bool flushed = false;
847 :
848 : /*
192 efujita 849 ECB : * When we've reached the desired batch size, perform the
850 : * insertion.
851 : */
809 tomas.vondra 852 GIC 70120 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
853 : {
809 tomas.vondra 854 CBC 9 : ExecBatchInsert(mtstate, resultRelInfo,
697 tgl 855 ECB : resultRelInfo->ri_Slots,
856 : resultRelInfo->ri_PlanSlots,
857 : resultRelInfo->ri_NumSlots,
858 : estate, canSetTag);
809 tomas.vondra 859 GIC 9 : resultRelInfo->ri_NumSlots = 0;
135 efujita 860 9 : flushed = true;
861 : }
862 :
809 tomas.vondra 863 CBC 70120 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
864 :
865 70120 : if (resultRelInfo->ri_Slots == NULL)
866 : {
809 tomas.vondra 867 GIC 28 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
697 tgl 868 14 : resultRelInfo->ri_BatchSize);
809 tomas.vondra 869 14 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
697 tgl 870 14 : resultRelInfo->ri_BatchSize);
809 tomas.vondra 871 ECB : }
872 :
667 873 : /*
874 : * Initialize the batch slots. We don't know how many slots will
875 : * be needed, so we initialize them as the batch grows, and we
876 : * keep them across batches. To mitigate an inefficiency in how
877 : * resource owner handles objects with many references (as with
650 andrew 878 : * many slots all referencing the same tuple descriptor) we copy
605 tomas.vondra 879 : * the appropriate tuple descriptor for each slot.
880 : */
667 tomas.vondra 881 GIC 70120 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
667 tomas.vondra 882 ECB : {
650 andrew 883 GIC 65596 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
605 tomas.vondra 884 ECB : TupleDesc plan_tdesc =
332 tgl 885 GIC 65596 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
667 tomas.vondra 886 ECB :
667 tomas.vondra 887 CBC 131192 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
888 65596 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
667 tomas.vondra 889 ECB :
667 tomas.vondra 890 GIC 131192 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
605 891 65596 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
892 :
893 : /* remember how many batch slots we initialized */
667 894 65596 : resultRelInfo->ri_NumSlotsInitialized++;
895 : }
896 :
662 897 70120 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
898 : slot);
899 :
662 tomas.vondra 900 CBC 70120 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
901 : planSlot);
662 tomas.vondra 902 ECB :
903 : /*
135 efujita 904 : * If these are the first tuples stored in the buffers, add the
905 : * target rel and the mtstate to the
122 906 : * es_insert_pending_result_relations and
907 : * es_insert_pending_modifytables lists respectively, execpt in
908 : * the case where flushing was done above, in which case they
909 : * would already have been added to the lists, so no need to do
910 : * this.
911 : */
135 efujita 912 GIC 70120 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
135 efujita 913 ECB : {
135 efujita 914 GIC 18 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
915 : resultRelInfo));
135 efujita 916 CBC 18 : estate->es_insert_pending_result_relations =
135 efujita 917 GIC 18 : lappend(estate->es_insert_pending_result_relations,
918 : resultRelInfo);
122 efujita 919 CBC 18 : estate->es_insert_pending_modifytables =
122 efujita 920 GIC 18 : lappend(estate->es_insert_pending_modifytables, mtstate);
921 : }
135 922 70120 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
923 : resultRelInfo));
924 :
809 tomas.vondra 925 70120 : resultRelInfo->ri_NumSlots++;
926 :
927 70120 : MemoryContextSwitchTo(oldContext);
928 :
929 70120 : return NULL;
930 : }
809 tomas.vondra 931 ECB :
932 : /*
3682 tgl 933 : * insert into foreign table: let the FDW do it
934 : */
3682 tgl 935 CBC 863 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
3682 tgl 936 ECB : resultRelInfo,
937 : slot,
938 : planSlot);
939 :
3682 tgl 940 GIC 860 : if (slot == NULL) /* "do nothing" */
3682 tgl 941 CBC 2 : return NULL;
942 :
943 : /*
2621 rhaas 944 ECB : * AFTER ROW Triggers or RETURNING expressions might reference the
945 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
688 tgl 946 : * them. (This covers the case where the FDW replaced the slot.)
947 : */
1503 andres 948 CBC 858 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
949 : }
950 : else
951 : {
952 : WCOKind wco_kind;
953 :
3485 rhaas 954 ECB : /*
955 : * Constraints and GENERATED expressions might reference the tableoid
956 : * column, so (re-)initialize tts_tableOid before evaluating them.
957 : */
1503 andres 958 GIC 5961966 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
3485 rhaas 959 ECB :
1471 peter 960 : /*
961 : * Compute stored generated columns
962 : */
1471 peter 963 GIC 5961966 : if (resultRelationDesc->rd_att->constr &&
964 1773040 : resultRelationDesc->rd_att->constr->has_generated_stored)
907 heikki.linnakangas 965 388 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
966 : CMD_INSERT);
1471 peter 967 ECB :
968 : /*
969 : * Check any RLS WITH CHECK policies.
970 : *
971 : * Normally we should check INSERT policies. But if the insert is the
972 : * result of a partition key update that moved the tuple to a new
973 : * partition, we should instead check UPDATE policies, because we are
974 : * executing policies defined on the target table, and not those
975 : * defined on the child partitions.
976 : *
377 alvherre 977 : * If we're running MERGE, we refer to the action that we're executing
978 : * to know if we're doing an INSERT or UPDATE to a partition table.
979 : */
377 alvherre 980 GIC 5961960 : if (mtstate->operation == CMD_UPDATE)
981 340 : wco_kind = WCO_RLS_UPDATE_CHECK;
377 alvherre 982 CBC 5961620 : else if (mtstate->operation == CMD_MERGE)
983 749 : wco_kind = (context->relaction->mas_action->commandType == CMD_UPDATE) ?
984 749 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
985 : else
377 alvherre 986 GIC 5960871 : wco_kind = WCO_RLS_INSERT_CHECK;
987 :
988 : /*
989 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
990 : * we are looking for at this point.
991 : */
2907 sfrost 992 5961960 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1906 rhaas 993 261 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
994 :
995 : /*
996 : * Check the constraints of the tuple.
997 : */
1763 alvherre 998 5961876 : if (resultRelationDesc->rd_att->constr)
1763 alvherre 999 CBC 1772995 : ExecConstraints(resultRelInfo, slot, estate);
2132 rhaas 1000 ECB :
1763 alvherre 1001 : /*
1002 : * Also check the tuple against the partition constraint, if there is
1003 : * one; except that if we got here via tuple-routing, we don't need to
1004 : * if there's no BR trigger defined on the partition.
1005 : */
935 tgl 1006 GIC 5961598 : if (resultRelationDesc->rd_rel->relispartition &&
790 heikki.linnakangas 1007 362448 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1763 alvherre 1008 360075 : (resultRelInfo->ri_TrigDesc &&
1009 634 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1010 2465 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
4564 tgl 1011 ECB :
2893 andres 1012 CBC 5961514 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
2893 andres 1013 GIC 2005 : {
1014 : /* Perform a speculative insertion. */
1015 : uint32 specToken;
1016 : ItemPointerData conflictTid;
2893 andres 1017 ECB : bool specConflict;
1847 alvherre 1018 : List *arbiterIndexes;
1019 :
1840 alvherre 1020 GIC 4690 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1021 :
1022 : /*
1023 : * Do a non-conclusive check for conflicts first.
1024 : *
2893 andres 1025 ECB : * We're not holding any locks yet, so this doesn't guarantee that
1026 : * the later insert won't conflict. But it avoids leaving behind
1027 : * a lot of canceled speculative insertions, if you run a lot of
1028 : * INSERT ON CONFLICT statements that do conflict.
1029 : *
1030 : * We loop back here if we find a conflict below, either during
1031 : * the pre-check, or when we re-check after inserting the tuple
248 tgl 1032 : * speculatively. Better allow interrupts in case some bug makes
1033 : * this an infinite loop.
1034 : */
2893 andres 1035 GIC 5 : vlock:
248 tgl 1036 4695 : CHECK_FOR_INTERRUPTS();
2893 andres 1037 4695 : specConflict = false;
907 heikki.linnakangas 1038 4695 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
907 heikki.linnakangas 1039 ECB : &conflictTid, arbiterIndexes))
1040 : {
1041 : /* committed conflict tuple found */
2893 andres 1042 GIC 2679 : if (onconflict == ONCONFLICT_UPDATE)
1043 : {
1044 : /*
1045 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1046 : * part. Be prepared to retry if the UPDATE fails because
1047 : * of another concurrent UPDATE/DELETE to the conflict
1048 : * tuple.
1049 : */
1050 2600 : TupleTableSlot *returning = NULL;
1051 :
388 alvherre 1052 2600 : if (ExecOnConflictUpdate(context, resultRelInfo,
1053 : &conflictTid, slot, canSetTag,
388 alvherre 1054 ECB : &returning))
2893 andres 1055 : {
1825 alvherre 1056 CBC 2561 : InstrCountTuples2(&mtstate->ps, 1);
2893 andres 1057 2561 : return returning;
1058 : }
1059 : else
2893 andres 1060 UIC 0 : goto vlock;
2893 andres 1061 ECB : }
1062 : else
1063 : {
1064 : /*
1065 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1066 : * verify that the tuple is visible to the executor's MVCC
1067 : * snapshot at higher isolation levels.
1068 : *
1478 1069 : * Using ExecGetReturningSlot() to store the tuple for the
1070 : * recheck isn't that pretty, but we can't trivially use
1071 : * the input slot, because it might not be of a compatible
1072 : * type. As there's no conflicting usage of
1073 : * ExecGetReturningSlot() in the DO NOTHING case...
1074 : */
2893 andres 1075 CBC 79 : Assert(onconflict == ONCONFLICT_NOTHING);
1478 1076 79 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1077 : ExecGetReturningSlot(estate, resultRelInfo));
1825 alvherre 1078 GIC 69 : InstrCountTuples2(&mtstate->ps, 1);
2893 andres 1079 GBC 69 : return NULL;
1080 : }
1081 : }
1082 :
1083 : /*
1084 : * Before we start insertion proper, acquire our "speculative
1085 : * insertion lock". Others can use that to wait for us to decide
1086 : * if we're going to go ahead with the insertion, instead of
1087 : * waiting for the whole transaction to complete.
1088 : */
2893 andres 1089 GIC 2013 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1090 :
1091 : /* insert the tuple, with the speculative token */
1417 1092 2013 : table_tuple_insert_speculative(resultRelationDesc, slot,
1093 : estate->es_output_cid,
1417 andres 1094 ECB : 0,
1095 : NULL,
1096 : specToken);
2893 1097 :
1098 : /* insert index entries for tuple */
907 heikki.linnakangas 1099 GIC 2013 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1100 : slot, estate, false, true,
1101 : &specConflict,
1102 : arbiterIndexes,
1103 : false);
1104 :
1105 : /* adjust the tuple's state accordingly */
1417 andres 1106 2010 : table_tuple_complete_speculative(resultRelationDesc, slot,
1107 2010 : specToken, !specConflict);
1108 :
2893 andres 1109 ECB : /*
1110 : * Wake up anyone waiting for our decision. They will re-check
1111 : * the tuple, see that it's no longer speculative, and wait on our
1112 : * XID as if this was a regularly inserted tuple all along. Or if
1113 : * we killed the tuple, they will see it's dead, and proceed as if
1114 : * the tuple never existed.
1115 : */
2893 andres 1116 GIC 2010 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1117 :
1118 : /*
2893 andres 1119 ECB : * If there was a conflict, start from the beginning. We'll do
1120 : * the pre-check again, which will now find the conflicting tuple
1121 : * (unless it aborts before we get there).
1122 : */
2893 andres 1123 GIC 2010 : if (specConflict)
1124 : {
1125 5 : list_free(recheckIndexes);
2893 andres 1126 CBC 5 : goto vlock;
2893 andres 1127 ECB : }
1128 :
1129 : /* Since there was no insertion conflict, we're done */
1130 : }
1131 : else
1132 : {
1133 : /* insert the tuple normally */
1417 andres 1134 GIC 5956824 : table_tuple_insert(resultRelationDesc, slot,
1135 : estate->es_output_cid,
1417 andres 1136 ECB : 0, NULL);
1137 :
1138 : /* insert index entries for tuple */
2893 andres 1139 GIC 5956812 : if (resultRelInfo->ri_NumIndices > 0)
907 heikki.linnakangas 1140 1620114 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1141 : slot, estate, false,
1142 : false, NULL, NIL,
1143 : false);
2893 andres 1144 ECB : }
1145 : }
4929 tgl 1146 :
4426 tgl 1147 CBC 5959482 : if (canSetTag)
4426 tgl 1148 GIC 5958877 : (estate->es_processed)++;
1149 :
1150 : /*
1151 : * If this insert is the result of a partition key update that moved the
1152 : * tuple to a new partition, put this row into the transition NEW TABLE,
1153 : * if there is one. We need to do this separately for DELETE and INSERT
1154 : * because they happen on different tables.
1906 rhaas 1155 ECB : */
1906 rhaas 1156 GIC 5959482 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1157 5959482 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1158 21 : && mtstate->mt_transition_capture->tcs_update_new_table)
1159 : {
385 alvherre 1160 CBC 21 : ExecARUpdateTriggers(estate, resultRelInfo,
385 alvherre 1161 ECB : NULL, NULL,
1162 : NULL,
1163 : NULL,
1164 : slot,
1165 : NULL,
385 alvherre 1166 GIC 21 : mtstate->mt_transition_capture,
1167 : false);
1906 rhaas 1168 ECB :
1169 : /*
1170 : * We've already captured the NEW TABLE row, so make sure any AR
1171 : * INSERT trigger fired below doesn't capture it again.
1172 : */
1906 rhaas 1173 GIC 21 : ar_insert_trig_tcs = NULL;
1174 : }
1175 :
1176 : /* AFTER ROW INSERT Triggers */
1503 andres 1177 CBC 5959482 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1906 rhaas 1178 ECB : ar_insert_trig_tcs);
4929 tgl 1179 :
4816 tgl 1180 GIC 5959482 : list_free(recheckIndexes);
4816 tgl 1181 ECB :
1182 : /*
1183 : * Check any WITH CHECK OPTION constraints from parent views. We are
1184 : * required to do this after testing all constraints and uniqueness
1185 : * violations per the SQL spec, so we do it after actually inserting the
1186 : * record into the heap and all indexes.
2907 sfrost 1187 : *
1188 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1189 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1190 : *
1191 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1192 : * are looking for at this point.
1193 : */
3552 sfrost 1194 CBC 5959482 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2907 sfrost 1195 GIC 161 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1196 :
1197 : /* Process RETURNING if present */
4929 tgl 1198 CBC 5959415 : if (resultRelInfo->ri_projectReturning)
2271 rhaas 1199 GIC 1663 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1200 :
385 alvherre 1201 CBC 5959409 : if (inserted_tuple)
385 alvherre 1202 GIC 338 : *inserted_tuple = slot;
1203 5959409 : if (insert_destrel)
1204 338 : *insert_destrel = resultRelInfo;
1205 :
2271 rhaas 1206 5959409 : return result;
1207 : }
1208 :
1209 : /* ----------------------------------------------------------------
1210 : * ExecBatchInsert
1211 : *
1212 : * Insert multiple tuples in an efficient way.
1213 : * Currently, this handles inserting into a foreign table without
1214 : * RETURNING clause.
809 tomas.vondra 1215 ECB : * ----------------------------------------------------------------
1216 : */
1217 : static void
809 tomas.vondra 1218 GIC 27 : ExecBatchInsert(ModifyTableState *mtstate,
697 tgl 1219 ECB : ResultRelInfo *resultRelInfo,
1220 : TupleTableSlot **slots,
1221 : TupleTableSlot **planSlots,
1222 : int numSlots,
1223 : EState *estate,
1224 : bool canSetTag)
809 tomas.vondra 1225 : {
1226 : int i;
809 tomas.vondra 1227 CBC 27 : int numInserted = numSlots;
809 tomas.vondra 1228 GIC 27 : TupleTableSlot *slot = NULL;
1229 : TupleTableSlot **rslots;
1230 :
1231 : /*
1232 : * insert into foreign table: let the FDW do it
1233 : */
1234 27 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1235 : resultRelInfo,
1236 : slots,
1237 : planSlots,
1238 : &numInserted);
809 tomas.vondra 1239 ECB :
809 tomas.vondra 1240 GIC 70147 : for (i = 0; i < numInserted; i++)
1241 : {
1242 70120 : slot = rslots[i];
1243 :
1244 : /*
1245 : * AFTER ROW Triggers might reference the tableoid column, so
1246 : * (re-)initialize tts_tableOid before evaluating them.
1247 : */
809 tomas.vondra 1248 CBC 70120 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
809 tomas.vondra 1249 ECB :
1250 : /* AFTER ROW INSERT Triggers */
809 tomas.vondra 1251 GIC 70120 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1252 70120 : mtstate->mt_transition_capture);
1253 :
1254 : /*
809 tomas.vondra 1255 ECB : * Check any WITH CHECK OPTION constraints from parent views. See the
1256 : * comment in ExecInsert.
1257 : */
809 tomas.vondra 1258 GIC 70120 : if (resultRelInfo->ri_WithCheckOptions != NIL)
809 tomas.vondra 1259 UIC 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1260 : }
809 tomas.vondra 1261 ECB :
809 tomas.vondra 1262 GIC 27 : if (canSetTag && numInserted > 0)
809 tomas.vondra 1263 CBC 27 : estate->es_processed += numInserted;
809 tomas.vondra 1264 GIC 27 : }
1265 :
1266 : /*
1267 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1268 : */
135 efujita 1269 ECB : static void
135 efujita 1270 GIC 17 : ExecPendingInserts(EState *estate)
1271 : {
122 efujita 1272 ECB : ListCell *l1,
1273 : *l2;
1274 :
122 efujita 1275 GIC 35 : forboth(l1, estate->es_insert_pending_result_relations,
1276 : l2, estate->es_insert_pending_modifytables)
1277 : {
1278 18 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
122 efujita 1279 CBC 18 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
135 efujita 1280 EUB :
135 efujita 1281 GIC 18 : Assert(mtstate);
1282 18 : ExecBatchInsert(mtstate, resultRelInfo,
135 efujita 1283 ECB : resultRelInfo->ri_Slots,
1284 : resultRelInfo->ri_PlanSlots,
1285 : resultRelInfo->ri_NumSlots,
135 efujita 1286 GIC 18 : estate, mtstate->canSetTag);
1287 18 : resultRelInfo->ri_NumSlots = 0;
1288 : }
1289 :
1290 17 : list_free(estate->es_insert_pending_result_relations);
122 efujita 1291 CBC 17 : list_free(estate->es_insert_pending_modifytables);
135 efujita 1292 GIC 17 : estate->es_insert_pending_result_relations = NIL;
122 1293 17 : estate->es_insert_pending_modifytables = NIL;
135 1294 17 : }
1295 :
388 alvherre 1296 ECB : /*
1297 : * ExecDeletePrologue -- subroutine for ExecDelete
1298 : *
1299 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1300 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1301 : * the delete a no-op; otherwise, return true.
1302 : */
1303 : static bool
388 alvherre 1304 GIC 843686 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1305 : ItemPointer tupleid, HeapTuple oldtuple,
1306 : TupleTableSlot **epqreturnslot, TM_Result *result)
388 alvherre 1307 ECB : {
27 dean.a.rasheed 1308 CBC 843686 : if (result)
27 dean.a.rasheed 1309 GIC 131 : *result = TM_Ok;
1310 :
388 alvherre 1311 ECB : /* BEFORE ROW DELETE triggers */
388 alvherre 1312 CBC 843686 : if (resultRelInfo->ri_TrigDesc &&
1313 3392 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
135 efujita 1314 ECB : {
1315 : /* Flush any pending inserts, so rows are visible to the triggers */
135 efujita 1316 GIC 170 : if (context->estate->es_insert_pending_result_relations != NIL)
1317 1 : ExecPendingInserts(context->estate);
1318 :
388 alvherre 1319 GNC 170 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1320 : resultRelInfo, tupleid, oldtuple,
1321 : epqreturnslot, result, &context->tmfd);
1322 : }
1323 :
388 alvherre 1324 GIC 843516 : return true;
388 alvherre 1325 ECB : }
1326 :
1327 : /*
1328 : * ExecDeleteAct -- subroutine for ExecDelete
1329 : *
1330 : * Actually delete the tuple from a plain table.
1331 : *
1332 : * Caller is in charge of doing EvalPlanQual as necessary
1333 : */
1334 : static TM_Result
388 alvherre 1335 GIC 843607 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1336 : ItemPointer tupleid, bool changingPart)
388 alvherre 1337 ECB : {
388 alvherre 1338 CBC 843607 : EState *estate = context->estate;
1339 :
1340 843607 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1341 : estate->es_output_cid,
1342 : estate->es_snapshot,
1343 : estate->es_crosscheck_snapshot,
1344 : true /* wait for commit */ ,
388 alvherre 1345 ECB : &context->tmfd,
1346 : changingPart);
1347 : }
1348 :
1349 : /*
1350 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1351 : *
1352 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1353 : * including the UPDATE triggers if the deletion is being done as part of a
1354 : * cross-partition tuple move.
1355 : */
1356 : static void
388 alvherre 1357 GIC 843591 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1358 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
388 alvherre 1359 ECB : {
388 alvherre 1360 GIC 843591 : ModifyTableState *mtstate = context->mtstate;
388 alvherre 1361 CBC 843591 : EState *estate = context->estate;
1362 : TransitionCaptureState *ar_delete_trig_tcs;
1363 :
1364 : /*
1365 : * If this delete is the result of a partition key update that moved the
1366 : * tuple to a new partition, put this row into the transition OLD TABLE,
1367 : * if there is one. We need to do this separately for DELETE and INSERT
1368 : * because they happen on different tables.
1369 : */
388 alvherre 1370 GIC 843591 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1371 843591 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1372 21 : mtstate->mt_transition_capture->tcs_update_old_table)
1373 : {
385 1374 21 : ExecARUpdateTriggers(estate, resultRelInfo,
1375 : NULL, NULL,
1376 : tupleid, oldtuple,
1377 21 : NULL, NULL, mtstate->mt_transition_capture,
385 alvherre 1378 ECB : false);
1379 :
1380 : /*
158 efujita 1381 : * We've already captured the OLD TABLE row, so make sure any AR
388 alvherre 1382 : * DELETE trigger fired below doesn't capture it again.
1383 : */
388 alvherre 1384 GIC 21 : ar_delete_trig_tcs = NULL;
1385 : }
1386 :
1387 : /* AFTER ROW DELETE Triggers */
1388 843591 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1389 : ar_delete_trig_tcs, changingPart);
1390 843591 : }
388 alvherre 1391 ECB :
4929 tgl 1392 : /* ----------------------------------------------------------------
1393 : * ExecDelete
1394 : *
1395 : * DELETE is like UPDATE, except that we delete the tuple and no
1396 : * index modifications are needed.
1397 : *
4564 1398 : * When deleting from a table, tupleid identifies the tuple to
1399 : * delete and oldtuple is NULL. When deleting from a view,
1400 : * oldtuple is passed to the INSTEAD OF triggers and identifies
1401 : * what to delete, and tupleid is invalid. When deleting from a
1402 : * foreign table, tupleid is invalid; the FDW has to figure out
1403 : * which row to delete using data from the planSlot. oldtuple is
1404 : * passed to foreign table triggers; it is NULL when the foreign
1732 akapila 1405 : * table has no relevant triggers. We use tupleDeleted to indicate
1406 : * whether the tuple is actually deleted, callers can use it to
1407 : * decide whether to continue the operation. When this DELETE is a
1408 : * part of an UPDATE of partition-key, then the slot returned by
388 alvherre 1409 : * EvalPlanQual() is passed back using output parameter epqreturnslot.
1410 : *
4929 tgl 1411 : * Returns RETURNING result if any, otherwise NULL.
1412 : * ----------------------------------------------------------------
1413 : */
1414 : static TupleTableSlot *
388 alvherre 1415 GIC 843555 : ExecDelete(ModifyTableContext *context,
1416 : ResultRelInfo *resultRelInfo,
1417 : ItemPointer tupleid,
1418 : HeapTuple oldtuple,
1419 : bool processReturning,
1420 : bool changingPart,
1421 : bool canSetTag,
1422 : bool *tupleDeleted,
1423 : TupleTableSlot **epqreturnslot)
1424 : {
1425 843555 : EState *estate = context->estate;
907 heikki.linnakangas 1426 843555 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
3682 tgl 1427 843555 : TupleTableSlot *slot = NULL;
1428 : TM_Result result;
1429 :
1906 rhaas 1430 843555 : if (tupleDeleted)
1431 431 : *tupleDeleted = false;
1432 :
1433 : /*
1434 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1435 : * done if it says we are.
388 alvherre 1436 ECB : */
388 alvherre 1437 GIC 843555 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1438 : epqreturnslot, NULL))
1439 14 : return NULL;
1440 :
1441 : /* INSTEAD OF ROW DELETE Triggers */
4564 tgl 1442 843524 : if (resultRelInfo->ri_TrigDesc &&
1443 3342 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
4929 1444 24 : {
1445 : bool dodelete;
4564 tgl 1446 ECB :
4564 tgl 1447 CBC 27 : Assert(oldtuple != NULL);
3304 noah 1448 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1449 :
4564 tgl 1450 GIC 27 : if (!dodelete) /* "do nothing" */
4929 tgl 1451 CBC 3 : return NULL;
4564 tgl 1452 ECB : }
3682 tgl 1453 GIC 843497 : else if (resultRelInfo->ri_FdwRoutine)
1454 : {
1455 : /*
1456 : * delete from foreign table: let the FDW do it
1457 : *
1503 andres 1458 ECB : * We offer the returning slot as a place to store RETURNING data,
1459 : * although the FDW can return some other slot if it wants.
3682 tgl 1460 : */
1503 andres 1461 GIC 17 : slot = ExecGetReturningSlot(estate, resultRelInfo);
3682 tgl 1462 17 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
3682 tgl 1463 ECB : resultRelInfo,
1464 : slot,
388 alvherre 1465 : context->planSlot);
1466 :
3682 tgl 1467 GIC 17 : if (slot == NULL) /* "do nothing" */
3682 tgl 1468 LBC 0 : return NULL;
2621 rhaas 1469 ECB :
1470 : /*
1471 : * RETURNING expressions might reference the tableoid column, so
1503 andres 1472 : * (re)initialize tts_tableOid before evaluating them.
1473 : */
1637 andres 1474 CBC 17 : if (TTS_EMPTY(slot))
2621 rhaas 1475 GIC 3 : ExecStoreAllNullTuple(slot);
1476 :
1503 andres 1477 17 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1478 : }
1479 : else
1480 : {
1481 : /*
4564 tgl 1482 ECB : * delete the tuple
1483 : *
1484 : * Note: if context->estate->es_crosscheck_snapshot isn't
1485 : * InvalidSnapshot, we check that the row to be deleted is visible to
1486 : * that snapshot, and throw a can't-serialize error if not. This is a
1487 : * special-case behavior needed for referential integrity updates in
388 alvherre 1488 : * transaction-snapshot mode transactions.
4564 tgl 1489 EUB : */
181 john.naylor 1490 GNC 843480 : ldelete:
6 akorotkov 1491 GIC 843482 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1492 :
4564 tgl 1493 843464 : switch (result)
1494 : {
1478 andres 1495 CBC 15 : case TM_SelfModified:
3602 bruce 1496 ECB :
1497 : /*
3817 kgrittn 1498 : * The target tuple was already updated or deleted by the
1499 : * current command, or by a later command in the current
1500 : * transaction. The former case is possible in a join DELETE
1501 : * where multiple tuples join to the same target tuple. This
1502 : * is somewhat questionable, but Postgres has always allowed
1503 : * it: we just ignore additional deletion attempts.
1504 : *
1505 : * The latter case arises if the tuple is modified by a
1506 : * command in a BEFORE trigger, or perhaps by a command in a
1507 : * volatile function used in the query. In such situations we
1508 : * should not ignore the deletion, but it is equally unsafe to
1509 : * proceed. We don't want to discard the original DELETE
1510 : * while keeping the triggered actions based on its deletion;
1511 : * and it would be no better to allow the original DELETE
3260 bruce 1512 : * while discarding updates that it triggered. The row update
1513 : * carries some information that might be important according
3817 kgrittn 1514 : * to business rules; so throwing an error is the only safe
1515 : * course.
1516 : *
1517 : * If a trigger actually intends this type of interaction, it
1518 : * can re-execute the DELETE and then return NULL to cancel
1519 : * the outer delete.
1520 : */
388 alvherre 1521 GIC 15 : if (context->tmfd.cmax != estate->es_output_cid)
3817 kgrittn 1522 3 : ereport(ERROR,
1523 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1524 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1525 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1526 :
1527 : /* Else, already deleted by self; nothing to do */
4564 tgl 1528 12 : return NULL;
1529 :
1478 andres 1530 843431 : case TM_Ok:
4564 tgl 1531 843431 : break;
1532 :
1478 andres 1533 15 : case TM_Updated:
1534 : {
1535 : TupleTableSlot *inputslot;
1536 : TupleTableSlot *epqslot;
1537 :
1538 15 : if (IsolationUsesXactSnapshot())
1478 andres 1539 UIC 0 : ereport(ERROR,
1540 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1541 : errmsg("could not serialize access due to concurrent update")));
1478 andres 1542 ECB :
1543 : /*
1544 : * Already know that we're going to need to do EPQ, so
1545 : * fetch tuple directly into the right slot.
1546 : */
388 alvherre 1547 GIC 15 : EvalPlanQualBegin(context->epqstate);
1548 15 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1478 andres 1549 ECB : resultRelInfo->ri_RangeTableIndex);
1550 :
6 akorotkov 1551 CBC 15 : result = table_tuple_lock(resultRelationDesc, tupleid,
6 akorotkov 1552 ECB : estate->es_snapshot,
1553 : inputslot, estate->es_output_cid,
1554 : LockTupleExclusive, LockWaitBlock,
1555 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1556 : &context->tmfd);
1557 :
6 akorotkov 1558 GIC 13 : switch (result)
4564 tgl 1559 ECB : {
6 akorotkov 1560 GBC 10 : case TM_Ok:
6 akorotkov 1561 GIC 10 : Assert(context->tmfd.traversed);
1562 10 : epqslot = EvalPlanQual(context->epqstate,
1563 : resultRelationDesc,
1564 : resultRelInfo->ri_RangeTableIndex,
1565 : inputslot);
1566 10 : if (TupIsNull(epqslot))
1567 : /* Tuple not passing quals anymore, exiting... */
6 akorotkov 1568 CBC 2 : return NULL;
6 akorotkov 1569 ECB :
1570 : /*
1571 : * If requested, skip delete and pass back the
1572 : * updated row.
1573 : */
6 akorotkov 1574 GIC 8 : if (epqreturnslot)
1575 : {
1576 6 : *epqreturnslot = epqslot;
1577 6 : return NULL;
1578 : }
6 akorotkov 1579 ECB : else
6 akorotkov 1580 GIC 2 : goto ldelete;
1478 andres 1581 ECB :
6 akorotkov 1582 CBC 2 : case TM_SelfModified:
1478 andres 1583 ECB :
1584 : /*
1585 : * This can be reached when following an update
1586 : * chain from a tuple updated by another session,
6 akorotkov 1587 : * reaching a tuple that was already updated in
1588 : * this transaction. If previously updated by this
1589 : * command, ignore the delete, otherwise error
1590 : * out.
1591 : *
1592 : * See also TM_SelfModified response to
1593 : * table_tuple_delete() above.
1594 : */
6 akorotkov 1595 CBC 2 : if (context->tmfd.cmax != estate->es_output_cid)
6 akorotkov 1596 GIC 1 : ereport(ERROR,
6 akorotkov 1597 ECB : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1598 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1599 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
6 akorotkov 1600 GIC 1 : return NULL;
6 akorotkov 1601 ECB :
6 akorotkov 1602 GIC 1 : case TM_Deleted:
6 akorotkov 1603 ECB : /* tuple already deleted; nothing to do */
6 akorotkov 1604 GIC 1 : return NULL;
1605 :
6 akorotkov 1606 UIC 0 : default:
1607 :
1608 : /*
1609 : * TM_Invisible should be impossible because we're
1610 : * waiting for updated row versions, and would
1611 : * already have errored out if the first version
1612 : * is invisible.
1613 : *
1614 : * TM_Updated should be impossible, because we're
1615 : * locking the latest version via
6 akorotkov 1616 ECB : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1617 : */
6 akorotkov 1618 UIC 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1619 : result);
1620 : return NULL;
4564 tgl 1621 ECB : }
1622 :
6 akorotkov 1623 : Assert(false);
1624 : break;
4929 tgl 1625 : }
1626 :
1478 andres 1627 GBC 3 : case TM_Deleted:
1478 andres 1628 GIC 3 : if (IsolationUsesXactSnapshot())
1478 andres 1629 UIC 0 : ereport(ERROR,
1630 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1631 : errmsg("could not serialize access due to concurrent delete")));
1632 : /* tuple already deleted; nothing to do */
4564 tgl 1633 GIC 3 : return NULL;
1634 :
4564 tgl 1635 UIC 0 : default:
1417 andres 1636 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1637 : result);
1638 : return NULL;
4564 tgl 1639 EUB : }
1640 :
1641 : /*
1642 : * Note: Normally one would think that we have to delete index tuples
1643 : * associated with the heap tuple now...
1644 : *
1645 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1646 : * take care of it later. We can't delete index tuples immediately
1647 : * anyway, since the tuple is still visible to other transactions.
4564 tgl 1648 ECB : */
4929 1649 : }
4929 tgl 1650 EUB :
4426 tgl 1651 GIC 843472 : if (canSetTag)
1652 842954 : (estate->es_processed)++;
1653 :
1906 rhaas 1654 ECB : /* Tell caller that the delete actually happened. */
1906 rhaas 1655 GIC 843472 : if (tupleDeleted)
1906 rhaas 1656 GBC 404 : *tupleDeleted = true;
1906 rhaas 1657 EUB :
385 alvherre 1658 GIC 843472 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1659 :
1660 : /* Process RETURNING if present and if requested */
1906 rhaas 1661 843472 : if (processReturning && resultRelInfo->ri_projectReturning)
1662 : {
1663 : /*
1664 : * We have to put the target tuple into a slot, which means first we
1665 : * gotta fetch it. We can use the trigger tuple slot.
1666 : */
1667 : TupleTableSlot *rslot;
1668 :
3682 tgl 1669 436 : if (resultRelInfo->ri_FdwRoutine)
1670 : {
1671 : /* FDW must have provided a slot containing the deleted row */
3682 tgl 1672 CBC 3 : Assert(!TupIsNull(slot));
4564 tgl 1673 ECB : }
1674 : else
1675 : {
1503 andres 1676 CBC 433 : slot = ExecGetReturningSlot(estate, resultRelInfo);
3682 tgl 1677 433 : if (oldtuple != NULL)
1678 : {
1451 andres 1679 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1680 : }
1681 : else
3682 tgl 1682 ECB : {
1417 andres 1683 GIC 421 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1684 : SnapshotAny, slot))
3682 tgl 1685 UIC 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1686 : }
1687 : }
1688 :
388 alvherre 1689 GIC 436 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
4929 tgl 1690 ECB :
1691 : /*
1692 : * Before releasing the target tuple again, make sure rslot has a
3682 1693 : * local copy of any pass-by-reference values.
1694 : */
3682 tgl 1695 GIC 436 : ExecMaterializeSlot(rslot);
1696 :
4929 tgl 1697 CBC 436 : ExecClearTuple(slot);
4929 tgl 1698 ECB :
4929 tgl 1699 GIC 436 : return rslot;
4929 tgl 1700 ECB : }
1701 :
4929 tgl 1702 GIC 843036 : return NULL;
1703 : }
4929 tgl 1704 ECB :
1705 : /*
906 heikki.linnakangas 1706 EUB : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1707 : *
1708 : * This works by first deleting the old tuple from the current partition,
1709 : * followed by inserting the new tuple into the root parent table, that is,
906 heikki.linnakangas 1710 ECB : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1711 : * correct partition.
1712 : *
1713 : * Returns true if the tuple has been successfully moved, or if it's found
1714 : * that the tuple was concurrently deleted so there's nothing more to do
1715 : * for the caller.
1716 : *
1717 : * False is returned if the tuple we're trying to move is found to have been
386 alvherre 1718 : * concurrently updated. In that case, the caller must check if the updated
1719 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
27 dean.a.rasheed 1720 : * this function again or perform a regular update accordingly. For MERGE,
1721 : * the updated tuple is not returned in *retry_slot; it has its own retry
1722 : * logic.
906 heikki.linnakangas 1723 : */
1724 : static bool
388 alvherre 1725 GIC 452 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1726 : ResultRelInfo *resultRelInfo,
1727 : ItemPointer tupleid, HeapTuple oldtuple,
1728 : TupleTableSlot *slot,
1729 : bool canSetTag,
1730 : UpdateContext *updateCxt,
1731 : TupleTableSlot **retry_slot,
1732 : TupleTableSlot **inserted_tuple,
1733 : ResultRelInfo **insert_destrel)
1734 : {
1735 452 : ModifyTableState *mtstate = context->mtstate;
906 heikki.linnakangas 1736 452 : EState *estate = mtstate->ps.state;
1737 : TupleConversionMap *tupconv_map;
1738 : bool tuple_deleted;
1739 452 : TupleTableSlot *epqslot = NULL;
1740 :
388 alvherre 1741 452 : context->cpUpdateReturningSlot = NULL;
27 dean.a.rasheed 1742 452 : *retry_slot = NULL;
1743 :
1744 : /*
1745 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
906 heikki.linnakangas 1746 ECB : * to migrate to a different partition. Maybe this can be implemented
1747 : * some day, but it seems a fringe feature with little redeeming value.
1748 : */
906 heikki.linnakangas 1749 GIC 452 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
906 heikki.linnakangas 1750 UIC 0 : ereport(ERROR,
1751 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1752 : errmsg("invalid ON UPDATE specification"),
1753 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1754 :
1755 : /*
733 tgl 1756 ECB : * When an UPDATE is run directly on a leaf partition, simply fail with a
1757 : * partition constraint violation error.
1758 : */
733 tgl 1759 GIC 452 : if (resultRelInfo == mtstate->rootResultRelInfo)
906 heikki.linnakangas 1760 CBC 21 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1761 :
733 tgl 1762 ECB : /* Initialize tuple routing info if not already done. */
733 tgl 1763 CBC 431 : if (mtstate->mt_partition_tuple_routing == NULL)
1764 : {
733 tgl 1765 GIC 286 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1766 : MemoryContext oldcxt;
1767 :
1768 : /* Things built here have to last for the query duration. */
1769 286 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
733 tgl 1770 ECB :
733 tgl 1771 GBC 286 : mtstate->mt_partition_tuple_routing =
733 tgl 1772 GIC 286 : ExecSetupPartitionTupleRouting(estate, rootRel);
1773 :
1774 : /*
1775 : * Before a partition's tuple can be re-routed, it must first be
1776 : * converted to the root's format, so we'll need a slot for storing
1777 : * such tuples.
1778 : */
1779 286 : Assert(mtstate->mt_root_tuple_slot == NULL);
733 tgl 1780 CBC 286 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
733 tgl 1781 ECB :
733 tgl 1782 GIC 286 : MemoryContextSwitchTo(oldcxt);
1783 : }
733 tgl 1784 ECB :
1785 : /*
906 heikki.linnakangas 1786 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1787 : * We want to return rows from INSERT.
1788 : */
388 alvherre 1789 GIC 431 : ExecDelete(context, resultRelInfo,
388 alvherre 1790 ECB : tupleid, oldtuple,
1791 : false, /* processReturning */
906 heikki.linnakangas 1792 : true, /* changingPart */
388 alvherre 1793 : false, /* canSetTag */
1794 : &tuple_deleted, &epqslot);
1795 :
1796 : /*
1797 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1798 : * it was already deleted by self, or it was concurrently deleted by
1799 : * another transaction), then we should skip the insert as well;
906 heikki.linnakangas 1800 : * otherwise, an UPDATE could cause an increase in the total number of
1801 : * rows across all partitions, which is clearly wrong.
1802 : *
1803 : * For a normal UPDATE, the case where the tuple has been the subject of a
1804 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1805 : * machinery, but for an UPDATE that we've translated into a DELETE from
1806 : * this partition and an INSERT into some other partition, that's not
1807 : * available, because CTID chains can't span relation boundaries. We
1808 : * mimic the semantics to a limited extent by skipping the INSERT if the
1809 : * DELETE fails to find a tuple. This ensures that two concurrent
1810 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1811 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1812 : * it.
1813 : */
906 heikki.linnakangas 1814 GIC 430 : if (!tuple_deleted)
1815 : {
1816 : /*
1817 : * epqslot will be typically NULL. But when ExecDelete() finds that
1818 : * another transaction has concurrently updated the same row, it
1819 : * re-fetches the row, skips the delete, and epqslot is set to the
1820 : * re-fetched tuple slot. In that case, we need to do all the checks
1821 : * again. For MERGE, we leave everything to the caller (it must do
1822 : * additional rechecking, and might end up executing a different
1823 : * action entirely).
1824 : */
27 dean.a.rasheed 1825 26 : if (context->relaction != NULL)
1826 5 : return false;
1827 21 : else if (TupIsNull(epqslot))
906 heikki.linnakangas 1828 18 : return true;
1829 : else
1830 : {
1831 : /* Fetch the most recent version of old tuple. */
1832 : TupleTableSlot *oldSlot;
1833 :
1834 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
733 tgl 1835 CBC 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
733 tgl 1836 UIC 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
733 tgl 1837 GIC 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
739 1838 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1839 : tupleid,
1840 : SnapshotAny,
1841 : oldSlot))
739 tgl 1842 UIC 0 : elog(ERROR, "failed to fetch tuple being updated");
1843 : /* and project the new tuple to retry the UPDATE with */
27 dean.a.rasheed 1844 GIC 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1845 : oldSlot);
906 heikki.linnakangas 1846 CBC 3 : return false;
906 heikki.linnakangas 1847 ECB : }
1848 : }
1849 :
1850 : /*
1851 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1852 : * convert the tuple into root's tuple descriptor if needed, since
1853 : * ExecInsert() starts the search from root.
1854 : */
733 tgl 1855 GIC 404 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
906 heikki.linnakangas 1856 CBC 404 : if (tupconv_map != NULL)
906 heikki.linnakangas 1857 GBC 142 : slot = execute_attr_map_slot(tupconv_map->attrMap,
906 heikki.linnakangas 1858 ECB : slot,
1859 : mtstate->mt_root_tuple_slot);
1860 :
1861 : /* Tuple routing starts from the root table. */
388 alvherre 1862 GIC 340 : context->cpUpdateReturningSlot =
385 alvherre 1863 GBC 404 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1864 : inserted_tuple, insert_destrel);
906 heikki.linnakangas 1865 ECB :
1866 : /*
902 1867 : * Reset the transition state that may possibly have been written by
1868 : * INSERT.
1869 : */
906 heikki.linnakangas 1870 GIC 340 : if (mtstate->mt_transition_capture)
1871 21 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1872 :
1873 : /* We're done moving. */
1874 340 : return true;
1875 : }
906 heikki.linnakangas 1876 ECB :
388 alvherre 1877 : /*
1878 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1879 : *
1880 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1881 : * triggers. We return false if one of them makes the update a no-op;
1882 : * otherwise, return true.
1883 : */
1884 : static bool
388 alvherre 1885 GIC 186549 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1886 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1887 : TM_Result *result)
1888 : {
1889 186549 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1890 :
27 dean.a.rasheed 1891 CBC 186549 : if (result)
1892 726 : *result = TM_Ok;
1893 :
388 alvherre 1894 GIC 186549 : ExecMaterializeSlot(slot);
388 alvherre 1895 ECB :
1896 : /*
1897 : * Open the table's indexes, if we have not done so already, so that we
1898 : * can add new index entries for the updated tuple.
1899 : */
388 alvherre 1900 GIC 186549 : if (resultRelationDesc->rd_rel->relhasindex &&
1901 139010 : resultRelInfo->ri_IndexRelationDescs == NULL)
1902 4647 : ExecOpenIndices(resultRelInfo, false);
1903 :
1904 : /* BEFORE ROW UPDATE triggers */
1905 186549 : if (resultRelInfo->ri_TrigDesc &&
388 alvherre 1906 CBC 2844 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1907 : {
1908 : /* Flush any pending inserts, so rows are visible to the triggers */
135 efujita 1909 GIC 1265 : if (context->estate->es_insert_pending_result_relations != NIL)
135 efujita 1910 CBC 1 : ExecPendingInserts(context->estate);
1911 :
388 alvherre 1912 GNC 1265 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1913 : resultRelInfo, tupleid, oldtuple, slot,
1914 : result, &context->tmfd);
135 efujita 1915 ECB : }
1916 :
388 alvherre 1917 GIC 185284 : return true;
1918 : }
1919 :
1920 : /*
34 tgl 1921 ECB : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
388 alvherre 1922 : *
1923 : * Apply the final modifications to the tuple slot before the update.
1924 : * (This is split out because we also need it in the foreign-table code path.)
1925 : */
1926 : static void
388 alvherre 1927 CBC 186445 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1928 : TupleTableSlot *slot,
1929 : EState *estate)
388 alvherre 1930 ECB : {
388 alvherre 1931 CBC 186445 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1932 :
388 alvherre 1933 ECB : /*
1934 : * Constraints and GENERATED expressions might reference the tableoid
1935 : * column, so (re-)initialize tts_tableOid before evaluating them.
1936 : */
388 alvherre 1937 GIC 186445 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
388 alvherre 1938 ECB :
1939 : /*
1940 : * Compute stored generated columns
1941 : */
388 alvherre 1942 GIC 186445 : if (resultRelationDesc->rd_att->constr &&
1943 125541 : resultRelationDesc->rd_att->constr->has_generated_stored)
1944 121 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1945 : CMD_UPDATE);
1946 186445 : }
1947 :
388 alvherre 1948 ECB : /*
1949 : * ExecUpdateAct -- subroutine for ExecUpdate
1950 : *
1951 : * Actually update the tuple, when operating on a plain table. If the
1952 : * table is a partition, and the command was called referencing an ancestor
1953 : * partitioned table, this routine migrates the resulting tuple to another
1954 : * partition.
1955 : *
1956 : * The caller is in charge of keeping indexes current as necessary. The
1957 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1958 : * be concurrently updated. However, in case of a cross-partition update,
1959 : * this routine does it.
1960 : */
1961 : static TM_Result
388 alvherre 1962 CBC 186371 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1963 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
6 akorotkov 1964 ECB : bool canSetTag, UpdateContext *updateCxt)
1965 : {
388 alvherre 1966 GIC 186371 : EState *estate = context->estate;
1967 186371 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1968 : bool partition_constraint_failed;
1969 : TM_Result result;
1970 :
1971 186371 : updateCxt->crossPartUpdate = false;
1972 :
1973 : /*
1974 : * If we move the tuple to a new partition, we loop back here to recompute
1975 : * GENERATED values (which are allowed to be different across partitions)
1976 : * and recheck any RLS policies and constraints. We do not fire any
1977 : * BEFORE triggers of the new partition, however.
1978 : */
181 john.naylor 1979 186374 : lreplace:
34 tgl 1980 ECB : /* Fill in GENERATEd columns */
34 tgl 1981 GIC 186374 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1982 :
1983 : /* ensure slot is independent, consider e.g. EPQ */
388 alvherre 1984 CBC 186374 : ExecMaterializeSlot(slot);
388 alvherre 1985 ECB :
1986 : /*
1987 : * If partition constraint fails, this row might get moved to another
1988 : * partition, in which case we should check the RLS CHECK policy just
1989 : * before inserting into the new partition, rather than doing it here.
1990 : * This is because a trigger on that partition might again change the row.
1991 : * So skip the WCO checks if the partition constraint fails.
1992 : */
388 alvherre 1993 GIC 186374 : partition_constraint_failed =
1994 187497 : resultRelationDesc->rd_rel->relispartition &&
1995 1123 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
1996 :
388 alvherre 1997 ECB : /* Check any RLS UPDATE WITH CHECK policies */
388 alvherre 1998 GIC 186374 : if (!partition_constraint_failed &&
388 alvherre 1999 CBC 185922 : resultRelInfo->ri_WithCheckOptions != NIL)
2000 : {
2001 : /*
388 alvherre 2002 ECB : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2003 : * we are looking for at this point.
2004 : */
388 alvherre 2005 GIC 225 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2006 : resultRelInfo, slot, estate);
2007 : }
2008 :
2009 : /*
2010 : * If a partition check failed, try to move the row into the right
388 alvherre 2011 ECB : * partition.
2012 : */
388 alvherre 2013 CBC 186350 : if (partition_constraint_failed)
2014 : {
2015 : TupleTableSlot *inserted_tuple,
27 dean.a.rasheed 2016 ECB : *retry_slot;
385 alvherre 2017 CBC 452 : ResultRelInfo *insert_destrel = NULL;
2018 :
2019 : /*
2020 : * ExecCrossPartitionUpdate will first DELETE the row from the
2021 : * partition it's currently in and then insert it back into the root
2022 : * table, which will re-route it to the correct partition. However,
388 alvherre 2023 ECB : * if the tuple has been concurrently updated, a retry is needed.
2024 : */
388 alvherre 2025 GIC 452 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2026 : tupleid, oldtuple, slot,
2027 : canSetTag, updateCxt,
2028 : &retry_slot,
2029 : &inserted_tuple,
2030 : &insert_destrel))
388 alvherre 2031 ECB : {
2032 : /* success! */
388 alvherre 2033 GIC 358 : updateCxt->updated = true;
2034 358 : updateCxt->crossPartUpdate = true;
385 alvherre 2035 ECB :
2036 : /*
2037 : * If the partitioned table being updated is referenced in foreign
2038 : * keys, queue up trigger events to check that none of them were
2039 : * violated. No special treatment is needed in
2040 : * non-cross-partition update situations, because the leaf
2041 : * partition's AR update triggers will take care of that. During
2042 : * cross-partition updates implemented as delete on the source
2043 : * partition followed by insert on the destination partition,
2044 : * AR-UPDATE triggers of the root table (that is, the table
2045 : * mentioned in the query) must be fired.
2046 : *
2047 : * NULL insert_destrel means that the move failed to occur, that
2048 : * is, the update failed, so no need to anything in that case.
2049 : */
385 alvherre 2050 GIC 358 : if (insert_destrel &&
385 alvherre 2051 CBC 338 : resultRelInfo->ri_TrigDesc &&
2052 151 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
385 alvherre 2053 GIC 120 : ExecCrossPartitionUpdateForeignKey(context,
2054 : resultRelInfo,
2055 : insert_destrel,
2056 : tupleid, slot,
2057 : inserted_tuple);
2058 :
388 2059 360 : return TM_Ok;
2060 : }
2061 :
2062 : /*
2063 : * No luck, a retry is needed. If running MERGE, we do not do so
2064 : * here; instead let it handle that on its own rules.
2065 : */
377 2066 8 : if (context->relaction != NULL)
2067 5 : return TM_Updated;
377 alvherre 2068 ECB :
388 2069 : /*
2070 : * ExecCrossPartitionUpdate installed an updated version of the new
2071 : * tuple in the retry slot; start over.
2072 : */
27 dean.a.rasheed 2073 GIC 3 : slot = retry_slot;
388 alvherre 2074 3 : goto lreplace;
2075 : }
2076 :
388 alvherre 2077 ECB : /*
2078 : * Check the constraints of the tuple. We've already checked the
2079 : * partition constraint above; however, we must still ensure the tuple
2080 : * passes all other constraints, so we will call ExecConstraints() and
2081 : * have it validate all remaining checks.
2082 : */
388 alvherre 2083 GIC 185898 : if (resultRelationDesc->rd_att->constr)
388 alvherre 2084 CBC 125282 : ExecConstraints(resultRelInfo, slot, estate);
388 alvherre 2085 ECB :
2086 : /*
2087 : * replace the heap tuple
2088 : *
2089 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2090 : * the row to be updated is visible to that snapshot, and throw a
2091 : * can't-serialize error if not. This is a special-case behavior needed
2092 : * for referential integrity updates in transaction-snapshot mode
2093 : * transactions.
2094 : */
388 alvherre 2095 GIC 185873 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2096 : estate->es_output_cid,
2097 : estate->es_snapshot,
2098 : estate->es_crosscheck_snapshot,
2099 : true /* wait for commit */ ,
2100 : &context->tmfd, &updateCxt->lockmode,
6 akorotkov 2101 ECB : &updateCxt->updateIndexes);
388 alvherre 2102 CBC 185861 : if (result == TM_Ok)
388 alvherre 2103 GIC 185747 : updateCxt->updated = true;
2104 :
2105 185861 : return result;
2106 : }
2107 :
2108 : /*
2109 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2110 : *
2111 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2112 : * returns indicating that the tuple was updated.
388 alvherre 2113 ECB : */
2114 : static void
388 alvherre 2115 GIC 185890 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2116 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2117 : HeapTuple oldtuple, TupleTableSlot *slot)
2118 : {
2119 185890 : ModifyTableState *mtstate = context->mtstate;
27 dean.a.rasheed 2120 CBC 185890 : List *recheckIndexes = NIL;
388 alvherre 2121 ECB :
2122 : /* insert index entries for tuple if necessary */
20 tomas.vondra 2123 GNC 185890 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
388 alvherre 2124 GIC 110651 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2125 : slot, context->estate,
2126 : true, false,
2127 : NULL, NIL,
20 tomas.vondra 2128 GNC 110651 : (updateCxt->updateIndexes == TU_Summarizing));
2129 :
2130 : /* AFTER ROW UPDATE Triggers */
388 alvherre 2131 GIC 185881 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2132 : NULL, NULL,
2133 : tupleid, oldtuple, slot,
388 alvherre 2134 ECB : recheckIndexes,
388 alvherre 2135 GIC 185881 : mtstate->operation == CMD_INSERT ?
2136 : mtstate->mt_oc_transition_capture :
2137 : mtstate->mt_transition_capture,
385 alvherre 2138 ECB : false);
388 2139 :
27 dean.a.rasheed 2140 GIC 185881 : list_free(recheckIndexes);
2141 :
388 alvherre 2142 ECB : /*
2143 : * Check any WITH CHECK OPTION constraints from parent views. We are
2144 : * required to do this after testing all constraints and uniqueness
2145 : * violations per the SQL spec, so we do it after actually updating the
2146 : * record in the heap and all indexes.
2147 : *
2148 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2149 : * are looking for at this point.
2150 : */
388 alvherre 2151 GIC 185881 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2152 212 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2153 : slot, context->estate);
388 alvherre 2154 CBC 185849 : }
2155 :
2156 : /*
2157 : * Queues up an update event using the target root partitioned table's
2158 : * trigger to check that a cross-partition update hasn't broken any foreign
385 alvherre 2159 ECB : * keys pointing into it.
2160 : */
2161 : static void
385 alvherre 2162 GIC 120 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2163 : ResultRelInfo *sourcePartInfo,
2164 : ResultRelInfo *destPartInfo,
2165 : ItemPointer tupleid,
2166 : TupleTableSlot *oldslot,
2167 : TupleTableSlot *newslot)
2168 : {
2169 : ListCell *lc;
385 alvherre 2170 ECB : ResultRelInfo *rootRelInfo;
2171 : List *ancestorRels;
2172 :
385 alvherre 2173 CBC 120 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
385 alvherre 2174 GIC 120 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2175 :
2176 : /*
2177 : * For any foreign keys that point directly into a non-root ancestors of
2178 : * the source partition, we can in theory fire an update event to enforce
2179 : * those constraints using their triggers, if we could tell that both the
2180 : * source and the destination partitions are under the same ancestor. But
385 alvherre 2181 ECB : * for now, we simply report an error that those cannot be enforced.
2182 : */
385 alvherre 2183 GIC 267 : foreach(lc, ancestorRels)
2184 : {
2185 150 : ResultRelInfo *rInfo = lfirst(lc);
2186 150 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2187 150 : bool has_noncloned_fkey = false;
2188 :
2189 : /* Root ancestor's triggers will be processed. */
2190 150 : if (rInfo == rootRelInfo)
2191 117 : continue;
385 alvherre 2192 ECB :
385 alvherre 2193 CBC 33 : if (trigdesc && trigdesc->trig_update_after_row)
2194 : {
385 alvherre 2195 GIC 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2196 : {
2197 84 : Trigger *trig = &trigdesc->triggers[i];
2198 :
2199 87 : if (!trig->tgisclone &&
2200 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2201 : {
385 alvherre 2202 CBC 3 : has_noncloned_fkey = true;
385 alvherre 2203 GIC 3 : break;
385 alvherre 2204 ECB : }
2205 : }
2206 : }
2207 :
385 alvherre 2208 GIC 33 : if (has_noncloned_fkey)
385 alvherre 2209 CBC 3 : ereport(ERROR,
385 alvherre 2210 ECB : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2211 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
197 peter 2212 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2213 : RelationGetRelationName(rInfo->ri_RelationDesc),
385 alvherre 2214 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2215 : errhint("Consider defining the foreign key on table \"%s\".",
2216 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2217 : }
2218 :
2219 : /* Perform the root table's triggers. */
385 alvherre 2220 GIC 117 : ExecARUpdateTriggers(context->estate,
385 alvherre 2221 ECB : rootRelInfo, sourcePartInfo, destPartInfo,
2222 : tupleid, NULL, newslot, NIL, NULL, true);
385 alvherre 2223 GIC 117 : }
2224 :
2225 : /* ----------------------------------------------------------------
2226 : * ExecUpdate
4929 tgl 2227 ECB : *
2228 : * note: we can't run UPDATE queries with transactions
2229 : * off because UPDATEs are actually INSERTs and our
2230 : * scan will mistakenly loop forever, updating the tuple
2231 : * it just inserted.. This should be fixed but until it
2232 : * is, we don't want to get stuck in an infinite loop
2233 : * which corrupts your database..
2234 : *
2235 : * When updating a table, tupleid identifies the tuple to
2236 : * update and oldtuple is NULL. When updating a view, oldtuple
2237 : * is passed to the INSTEAD OF triggers and identifies what to
2238 : * update, and tupleid is invalid. When updating a foreign table,
3304 noah 2239 : * tupleid is invalid; the FDW has to figure out which row to
2240 : * update using data from the planSlot. oldtuple is passed to
2241 : * foreign table triggers; it is NULL when the foreign table has
2242 : * no relevant triggers.
2243 : *
2244 : * slot contains the new tuple value to be stored.
2245 : * planSlot is the output of the ModifyTable's subplan; we use it
2246 : * to access values from other input tables (for RETURNING),
2247 : * row-ID junk columns, etc.
2248 : *
2249 : * Returns RETURNING result if any, otherwise NULL.
2250 : * ----------------------------------------------------------------
2251 : */
2252 : static TupleTableSlot *
388 alvherre 2253 GIC 185823 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2254 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2255 : bool canSetTag)
2256 : {
2257 185823 : EState *estate = context->estate;
907 heikki.linnakangas 2258 185823 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
388 alvherre 2259 185823 : UpdateContext updateCxt = {0};
2260 : TM_Result result;
2261 :
2262 : /*
2263 : * abort the operation if not running transactions
2264 : */
4929 tgl 2265 185823 : if (IsBootstrapProcessingMode())
4929 tgl 2266 UIC 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2267 :
2268 : /*
2269 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2270 : * done if it says we are.
2271 : */
27 dean.a.rasheed 2272 CBC 185823 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
388 alvherre 2273 GIC 69 : return NULL;
2274 :
2275 : /* INSTEAD OF ROW UPDATE Triggers */
4564 tgl 2276 CBC 185736 : if (resultRelInfo->ri_TrigDesc &&
2277 2686 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
4929 tgl 2278 ECB : {
1503 andres 2279 GIC 57 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2280 : oldtuple, slot))
1418 tgl 2281 9 : return NULL; /* "do nothing" */
2282 : }
3682 2283 185679 : else if (resultRelInfo->ri_FdwRoutine)
3682 tgl 2284 ECB : {
34 tgl 2285 EUB : /* Fill in GENERATEd columns */
388 alvherre 2286 GIC 71 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2287 :
2288 : /*
2289 : * update in foreign table: let the FDW do it
2290 : */
3682 tgl 2291 CBC 71 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
3682 tgl 2292 ECB : resultRelInfo,
2293 : slot,
2294 : context->planSlot);
2295 :
3682 tgl 2296 CBC 71 : if (slot == NULL) /* "do nothing" */
3682 tgl 2297 GIC 1 : return NULL;
3682 tgl 2298 ECB :
2299 : /*
2621 rhaas 2300 : * AFTER ROW Triggers or RETURNING expressions might reference the
2301 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
688 tgl 2302 : * them. (This covers the case where the FDW replaced the slot.)
2303 : */
1503 andres 2304 GIC 70 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
3682 tgl 2305 ECB : }
2306 : else
2307 : {
2308 : /*
2309 : * If we generate a new candidate tuple after EvalPlanQual testing, we
34 2310 : * must loop back here to try again. (We don't need to redo triggers,
2311 : * however. If there are any BEFORE triggers then trigger.c will have
2312 : * done table_tuple_lock to lock the correct tuple, so there's no need
2313 : * to do them again.)
2314 : */
388 alvherre 2315 CBC 185608 : redo_act:
2316 185654 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2317 : canSetTag, &updateCxt);
2318 :
2319 : /*
2320 : * If ExecUpdateAct reports that a cross-partition update was done,
2321 : * then the RETURNING tuple (if any) has been projected and there's
2322 : * nothing else for us to do.
2907 sfrost 2323 ECB : */
388 alvherre 2324 GIC 185511 : if (updateCxt.crossPartUpdate)
2325 327 : return context->cpUpdateReturningSlot;
2326 :
4564 tgl 2327 185184 : switch (result)
2328 : {
1478 andres 2329 42 : case TM_SelfModified:
2330 :
2331 : /*
2332 : * The target tuple was already updated or deleted by the
2333 : * current command, or by a later command in the current
3817 kgrittn 2334 ECB : * transaction. The former case is possible in a join UPDATE
3602 bruce 2335 : * where multiple tuples join to the same target tuple. This
2336 : * is pretty questionable, but Postgres has always allowed it:
2337 : * we just execute the first update action and ignore
2338 : * additional update attempts.
2339 : *
2340 : * The latter case arises if the tuple is modified by a
2341 : * command in a BEFORE trigger, or perhaps by a command in a
2342 : * volatile function used in the query. In such situations we
3817 kgrittn 2343 : * should not ignore the update, but it is equally unsafe to
2344 : * proceed. We don't want to discard the original UPDATE
2345 : * while keeping the triggered actions based on it; and we
2346 : * have no principled way to merge this update with the
2347 : * previous ones. So throwing an error is the only safe
2348 : * course.
2349 : *
2350 : * If a trigger actually intends this type of interaction, it
2351 : * can re-execute the UPDATE (assuming it can figure out how)
2352 : * and then return NULL to cancel the outer update.
2353 : */
388 alvherre 2354 GIC 42 : if (context->tmfd.cmax != estate->es_output_cid)
3817 kgrittn 2355 3 : ereport(ERROR,
2356 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2357 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2358 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2359 :
2360 : /* Else, already updated by self; nothing to do */
4564 tgl 2361 39 : return NULL;
2362 :
1478 andres 2363 185082 : case TM_Ok:
4564 tgl 2364 185082 : break;
2365 :
1478 andres 2366 56 : case TM_Updated:
2367 : {
2368 : TupleTableSlot *inputslot;
2369 : TupleTableSlot *epqslot;
2370 : TupleTableSlot *oldSlot;
2371 :
2372 56 : if (IsolationUsesXactSnapshot())
1478 andres 2373 CBC 1 : ereport(ERROR,
1478 andres 2374 ECB : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2375 : errmsg("could not serialize access due to concurrent update")));
2376 :
2377 : /*
2378 : * Already know that we're going to need to do EPQ, so
2379 : * fetch tuple directly into the right slot.
2380 : */
388 alvherre 2381 GIC 55 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1478 andres 2382 ECB : resultRelInfo->ri_RangeTableIndex);
2383 :
6 akorotkov 2384 GIC 55 : result = table_tuple_lock(resultRelationDesc, tupleid,
6 akorotkov 2385 ECB : estate->es_snapshot,
2386 : inputslot, estate->es_output_cid,
2387 : updateCxt.lockmode, LockWaitBlock,
2388 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2389 : &context->tmfd);
2390 :
6 akorotkov 2391 CBC 53 : switch (result)
6 akorotkov 2392 ECB : {
6 akorotkov 2393 GIC 49 : case TM_Ok:
2394 49 : Assert(context->tmfd.traversed);
2395 :
2396 49 : epqslot = EvalPlanQual(context->epqstate,
2397 : resultRelationDesc,
2398 : resultRelInfo->ri_RangeTableIndex,
2399 : inputslot);
6 akorotkov 2400 CBC 49 : if (TupIsNull(epqslot))
2401 : /* Tuple not passing quals anymore, exiting... */
6 akorotkov 2402 GIC 3 : return NULL;
6 akorotkov 2403 ECB :
2404 : /* Make sure ri_oldTupleSlot is initialized. */
6 akorotkov 2405 GIC 46 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
6 akorotkov 2406 UIC 0 : ExecInitUpdateProjection(context->mtstate,
2407 : resultRelInfo);
2408 :
2409 : /* Fetch the most recent version of old tuple. */
6 akorotkov 2410 CBC 46 : oldSlot = resultRelInfo->ri_oldTupleSlot;
6 akorotkov 2411 GIC 46 : if (!table_tuple_fetch_row_version(resultRelationDesc,
6 akorotkov 2412 ECB : tupleid,
2413 : SnapshotAny,
2414 : oldSlot))
6 akorotkov 2415 LBC 0 : elog(ERROR, "failed to fetch tuple being updated");
6 akorotkov 2416 GIC 46 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2417 : epqslot, oldSlot);
2418 46 : goto redo_act;
6 akorotkov 2419 ECB :
6 akorotkov 2420 GIC 1 : case TM_Deleted:
6 akorotkov 2421 ECB : /* tuple already deleted; nothing to do */
6 akorotkov 2422 GIC 1 : return NULL;
2423 :
6 akorotkov 2424 CBC 3 : case TM_SelfModified:
6 akorotkov 2425 EUB :
2426 : /*
2427 : * This can be reached when following an update
2428 : * chain from a tuple updated by another session,
6 akorotkov 2429 ECB : * reaching a tuple that was already updated in
2430 : * this transaction. If previously modified by
2431 : * this command, ignore the redundant update,
2432 : * otherwise error out.
2433 : *
6 akorotkov 2434 EUB : * See also TM_SelfModified response to
6 akorotkov 2435 ECB : * table_tuple_update() above.
2436 : */
6 akorotkov 2437 CBC 3 : if (context->tmfd.cmax != estate->es_output_cid)
6 akorotkov 2438 GIC 1 : ereport(ERROR,
6 akorotkov 2439 ECB : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2440 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2441 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
6 akorotkov 2442 GIC 2 : return NULL;
6 akorotkov 2443 ECB :
6 akorotkov 2444 UIC 0 : default:
2445 : /* see table_tuple_lock call in ExecDelete() */
2446 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2447 : result);
2448 : return NULL;
2449 : }
2450 : }
2451 :
2452 : break;
2453 :
1478 andres 2454 GIC 4 : case TM_Deleted:
2455 4 : if (IsolationUsesXactSnapshot())
1478 andres 2456 LBC 0 : ereport(ERROR,
1478 andres 2457 ECB : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2458 : errmsg("could not serialize access due to concurrent delete")));
2459 : /* tuple already deleted; nothing to do */
4564 tgl 2460 GIC 4 : return NULL;
4929 tgl 2461 ECB :
4564 tgl 2462 UIC 0 : default:
1417 andres 2463 UBC 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2464 : result);
4564 tgl 2465 EUB : return NULL;
2466 : }
2467 : }
2468 :
4426 tgl 2469 GIC 185197 : if (canSetTag)
2470 184902 : (estate->es_processed)++;
2471 :
388 alvherre 2472 185197 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
27 dean.a.rasheed 2473 ECB : slot);
4929 tgl 2474 :
4929 tgl 2475 EUB : /* Process RETURNING if present */
4929 tgl 2476 GIC 185156 : if (resultRelInfo->ri_projectReturning)
388 alvherre 2477 1060 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2478 :
4929 tgl 2479 CBC 184096 : return NULL;
2480 : }
4929 tgl 2481 EUB :
2893 andres 2482 : /*
2483 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2484 : *
2485 : * Try to lock tuple for update as part of speculative insertion. If
2486 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2487 : * (but still lock row, even though it may not satisfy estate's
2893 andres 2488 ECB : * snapshot).
2489 : *
2490 : * Returns true if we're done (with or without an update), or false if
2491 : * the caller must retry the INSERT from scratch.
2492 : */
2493 : static bool
388 alvherre 2494 GIC 2600 : ExecOnConflictUpdate(ModifyTableContext *context,
2893 andres 2495 ECB : ResultRelInfo *resultRelInfo,
2496 : ItemPointer conflictTid,
2497 : TupleTableSlot *excludedSlot,
2498 : bool canSetTag,
2499 : TupleTableSlot **returning)
2500 : {
388 alvherre 2501 GIC 2600 : ModifyTableState *mtstate = context->mtstate;
2893 andres 2502 2600 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2503 2600 : Relation relation = resultRelInfo->ri_RelationDesc;
1840 alvherre 2504 2600 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
1495 andres 2505 2600 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2506 : TM_FailureData tmfd;
2507 : LockTupleMode lockmode;
2508 : TM_Result test;
2509 : Datum xminDatum;
2510 : TransactionId xmin;
2511 : bool isnull;
2512 :
2893 andres 2513 ECB : /* Determine lock mode to use */
388 alvherre 2514 GIC 2600 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2515 :
2516 : /*
2517 : * Lock tuple for update. Don't follow updates when tuple cannot be
2518 : * locked without doing so. A row locking conflict here means our
2519 : * previous conclusion that the tuple is conclusively committed is not
2893 andres 2520 ECB : * true anymore.
2521 : */
1417 andres 2522 CBC 2600 : test = table_tuple_lock(relation, conflictTid,
388 alvherre 2523 2600 : context->estate->es_snapshot,
2524 2600 : existing, context->estate->es_output_cid,
2525 : lockmode, LockWaitBlock, 0,
2526 : &tmfd);
2893 andres 2527 GIC 2600 : switch (test)
2528 : {
1478 2529 2588 : case TM_Ok:
2530 : /* success! */
2893 2531 2588 : break;
2532 :
1478 andres 2533 CBC 12 : case TM_Invisible:
2534 :
2535 : /*
2536 : * This can occur when a just inserted tuple is updated again in
2537 : * the same command. E.g. because multiple rows with the same
2538 : * conflicting key values are inserted.
2539 : *
2540 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
1478 andres 2541 ECB : * case. We do not want to proceed because it would lead to the
2542 : * same row being updated a second time in some unspecified order,
2543 : * and in contrast to plain UPDATEs there's no historical behavior
2544 : * to break.
2545 : *
2893 2546 : * It is the user's responsibility to prevent this situation from
2547 : * occurring. These problems are why the SQL standard similarly
377 alvherre 2548 : * specifies that for SQL MERGE, an exception must be raised in
2549 : * the event of an attempt to update the same row twice.
2893 andres 2550 : */
1478 andres 2551 GIC 12 : xminDatum = slot_getsysattr(existing,
1478 andres 2552 ECB : MinTransactionIdAttributeNumber,
2553 : &isnull);
1478 andres 2554 GIC 12 : Assert(!isnull);
2555 12 : xmin = DatumGetTransactionId(xminDatum);
2556 :
2557 12 : if (TransactionIdIsCurrentTransactionId(xmin))
2893 2558 12 : ereport(ERROR,
2559 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2560 : /* translator: %s is a SQL command name */
2561 : errmsg("%s command cannot affect row a second time",
2562 : "ON CONFLICT DO UPDATE"),
2563 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2564 :
2565 : /* This shouldn't happen */
2893 andres 2566 UIC 0 : elog(ERROR, "attempted to lock invisible tuple");
2567 : break;
2568 :
1478 2569 0 : case TM_SelfModified:
2893 andres 2570 ECB :
2571 : /*
2572 : * This state should never be reached. As a dirty snapshot is used
2573 : * to find conflicting tuples, speculative insertion wouldn't have
2574 : * seen this row to conflict with.
2575 : */
2893 andres 2576 LBC 0 : elog(ERROR, "unexpected self-updated tuple");
1804 tgl 2577 ECB : break;
2578 :
1478 andres 2579 UIC 0 : case TM_Updated:
2893 2580 0 : if (IsolationUsesXactSnapshot())
2581 0 : ereport(ERROR,
2582 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2583 : errmsg("could not serialize access due to concurrent update")));
2584 :
1828 andres 2585 EUB : /*
2586 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2587 : * a partitioned table we shouldn't reach to a case where tuple to
2588 : * be lock is moved to another partition due to concurrent update
2589 : * of the partition key.
2590 : */
1478 andres 2591 UIC 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2592 :
2593 : /*
2594 : * Tell caller to try again from the very start.
2893 andres 2595 EUB : *
2596 : * It does not make sense to use the usual EvalPlanQual() style
2597 : * loop here, as the new version of the row might not conflict
2598 : * anymore, or the conflicting tuple has actually been deleted.
2599 : */
1478 andres 2600 UBC 0 : ExecClearTuple(existing);
1478 andres 2601 UIC 0 : return false;
2602 :
2603 0 : case TM_Deleted:
2604 0 : if (IsolationUsesXactSnapshot())
2605 0 : ereport(ERROR,
2606 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2607 : errmsg("could not serialize access due to concurrent delete")));
2608 :
2609 : /* see TM_Updated case */
1478 andres 2610 UBC 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
1478 andres 2611 UIC 0 : ExecClearTuple(existing);
2893 2612 0 : return false;
2613 :
2614 0 : default:
1417 2615 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2616 : }
2617 :
2618 : /* Success, the tuple is locked. */
2893 andres 2619 EUB :
2620 : /*
2621 : * Verify that the tuple is visible to our MVCC snapshot if the current
2622 : * isolation level mandates that.
2623 : *
2624 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2625 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2626 : *
2627 : * This means we only ever continue when a new command in the current
2628 : * transaction could see the row, even though in READ COMMITTED mode the
2629 : * tuple will not be visible according to the current statement's
2630 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2631 : * versions.
2632 : */
388 alvherre 2633 GBC 2588 : ExecCheckTupleVisible(context->estate, relation, existing);
2893 andres 2634 EUB :
2635 : /*
2636 : * Make tuple and any needed join variables available to ExecQual and
2637 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2638 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2639 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2640 : * other redirection.
2641 : */
1495 andres 2642 GIC 2588 : econtext->ecxt_scantuple = existing;
2893 2643 2588 : econtext->ecxt_innertuple = excludedSlot;
2644 2588 : econtext->ecxt_outertuple = NULL;
2645 :
2217 2646 2588 : if (!ExecQual(onConflictSetWhere, econtext))
2647 : {
1495 2648 16 : ExecClearTuple(existing); /* see return below */
2893 2649 16 : InstrCountFiltered1(&mtstate->ps, 1);
2650 16 : return true; /* done with the tuple */
2651 : }
2893 andres 2652 ECB :
2893 andres 2653 GIC 2572 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2654 : {
2655 : /*
2656 : * Check target's existing tuple against UPDATE-applicable USING
2657 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2658 : *
2659 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2660 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2893 andres 2661 ECB : * but that's almost the extent of its special handling for ON
2662 : * CONFLICT DO UPDATE.
2663 : *
2664 : * The rewriter will also have associated UPDATE applicable straight
2665 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2666 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2667 : * kinds, so there is no danger of spurious over-enforcement in the
2668 : * INSERT or UPDATE path.
2669 : */
2893 andres 2670 GIC 30 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2671 : existing,
2893 andres 2672 ECB : mtstate->ps.state);
2673 : }
2674 :
2675 : /* Project the new tuple version */
1840 alvherre 2676 GIC 2560 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2677 :
2678 : /*
2679 : * Note that it is possible that the target tuple has been modified in
2680 : * this session, after the above table_tuple_lock. We choose to not error
2681 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2682 : * This can happen if an UPDATE is triggered from within ExecQual(),
2683 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2684 : * wCTE in the ON CONFLICT's SET.
2685 : */
2686 :
2687 : /* Execute UPDATE with projection */
388 2688 5105 : *returning = ExecUpdate(context, resultRelInfo,
388 alvherre 2689 ECB : conflictTid, NULL,
1495 andres 2690 GIC 2560 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2691 : canSetTag);
2692 :
2693 : /*
2694 : * Clear out existing tuple, as there might not be another conflict among
1495 andres 2695 ECB : * the next input rows. Don't want to hold resources till the end of the
2696 : * query.
2697 : */
1495 andres 2698 GIC 2545 : ExecClearTuple(existing);
2893 2699 2545 : return true;
2700 : }
2701 :
2702 : /*
2703 : * Perform MERGE.
2704 : */
2705 : static TupleTableSlot *
377 alvherre 2706 2521 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
377 alvherre 2707 ECB : ItemPointer tupleid, bool canSetTag)
2708 : {
2709 : bool matched;
2710 :
2711 : /*-----
2712 : * If we are dealing with a WHEN MATCHED case (tupleid is valid), we
2713 : * execute the first action for which the additional WHEN MATCHED AND
2714 : * quals pass. If an action without quals is found, that action is
2715 : * executed.
2716 : *
2717 : * Similarly, if we are dealing with WHEN NOT MATCHED case, we look at
2718 : * the given WHEN NOT MATCHED actions in sequence until one passes.
2719 : *
2720 : * Things get interesting in case of concurrent update/delete of the
2721 : * target tuple. Such concurrent update/delete is detected while we are
2722 : * executing a WHEN MATCHED action.
2723 : *
2724 : * A concurrent update can:
2725 : *
2726 : * 1. modify the target tuple so that it no longer satisfies the
2727 : * additional quals attached to the current WHEN MATCHED action
2728 : *
2729 : * In this case, we are still dealing with a WHEN MATCHED case.
2730 : * We recheck the list of WHEN MATCHED actions from the start and
2731 : * choose the first one that satisfies the new target tuple.
2732 : *
2733 : * 2. modify the target tuple so that the join quals no longer pass and
2734 : * hence the source tuple no longer has a match.
2735 : *
2736 : * In this case, the source tuple no longer matches the target tuple,
2737 : * so we now instead find a qualifying WHEN NOT MATCHED action to
2738 : * execute.
2739 : *
2740 : * XXX Hmmm, what if the updated tuple would now match one that was
2741 : * considered NOT MATCHED so far?
2742 : *
2743 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED.
2744 : *
2745 : * ExecMergeMatched takes care of following the update chain and
2746 : * re-finding the qualifying WHEN MATCHED action, as long as the updated
2747 : * target tuple still satisfies the join quals, i.e., it remains a WHEN
2748 : * MATCHED case. If the tuple gets deleted or the join quals fail, it
2749 : * returns and we try ExecMergeNotMatched. Given that ExecMergeMatched
2750 : * always make progress by following the update chain and we never switch
2751 : * from ExecMergeNotMatched to ExecMergeMatched, there is no risk of a
2752 : * livelock.
2753 : */
377 alvherre 2754 GIC 2521 : matched = tupleid != NULL;
2755 2521 : if (matched)
2756 1493 : matched = ExecMergeMatched(context, resultRelInfo, tupleid, canSetTag);
2757 :
2758 : /*
2759 : * Either we were dealing with a NOT MATCHED tuple or ExecMergeMatched()
2760 : * returned "false", indicating the previously MATCHED tuple no longer
2761 : * matches.
2762 : */
2763 2496 : if (!matched)
2764 1036 : ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2765 :
2766 : /* No RETURNING support yet */
2767 2481 : return NULL;
2768 : }
2769 :
2770 : /*
2771 : * Check and execute the first qualifying MATCHED action. The current target
2772 : * tuple is identified by tupleid.
377 alvherre 2773 ECB : *
2774 : * We start from the first WHEN MATCHED action and check if the WHEN quals
2775 : * pass, if any. If the WHEN quals for the first action do not pass, we
2776 : * check the second, then the third and so on. If we reach to the end, no
2777 : * action is taken and we return true, indicating that no further action is
2778 : * required for this tuple.
2779 : *
2780 : * If we do find a qualifying action, then we attempt to execute the action.
2781 : *
2782 : * If the tuple is concurrently updated, EvalPlanQual is run with the updated
2783 : * tuple to recheck the join quals. Note that the additional quals associated
2784 : * with individual actions are evaluated by this routine via ExecQual, while
2785 : * EvalPlanQual checks for the join quals. If EvalPlanQual tells us that the
2786 : * updated tuple still passes the join quals, then we restart from the first
2787 : * action to look for a qualifying action. Otherwise, we return false --
2788 : * meaning that a NOT MATCHED action must now be executed for the current
2789 : * source tuple.
2790 : */
2791 : static bool
377 alvherre 2792 GIC 1493 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2793 : ItemPointer tupleid, bool canSetTag)
2794 : {
2795 1493 : ModifyTableState *mtstate = context->mtstate;
2796 : TupleTableSlot *newslot;
2797 1493 : EState *estate = context->estate;
2798 1493 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2799 : bool isNull;
2800 1493 : EPQState *epqstate = &mtstate->mt_epqstate;
2801 : ListCell *l;
2802 :
2803 : /*
2804 : * If there are no WHEN MATCHED actions, we are done.
2805 : */
2806 1493 : if (resultRelInfo->ri_matchedMergeAction == NIL)
2807 264 : return true;
2808 :
2809 : /*
2810 : * Make tuple and any needed join variables available to ExecQual and
377 alvherre 2811 ECB : * ExecProject. The target's existing tuple is installed in the scantuple.
2812 : * Again, this target relation's slot is required only in the case of a
2813 : * MATCHED tuple and UPDATE/DELETE actions.
2814 : */
377 alvherre 2815 GIC 1229 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
377 alvherre 2816 CBC 1229 : econtext->ecxt_innertuple = context->planSlot;
2817 1229 : econtext->ecxt_outertuple = NULL;
2818 :
181 john.naylor 2819 GNC 1247 : lmerge_matched:
2820 :
2821 : /*
2822 : * This routine is only invoked for matched rows, and we must have found
2823 : * the tupleid of the target row in that case; fetch that tuple.
2824 : *
377 alvherre 2825 ECB : * We use SnapshotAny for this because we might get called again after
2826 : * EvalPlanQual returns us a new tuple, which may not be visible to our
2827 : * MVCC snapshot.
2828 : */
2829 :
377 alvherre 2830 GIC 1247 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2831 : tupleid,
2832 : SnapshotAny,
2833 : resultRelInfo->ri_oldTupleSlot))
377 alvherre 2834 LBC 0 : elog(ERROR, "failed to fetch the target tuple");
377 alvherre 2835 ECB :
377 alvherre 2836 CBC 1938 : foreach(l, resultRelInfo->ri_matchedMergeAction)
2837 : {
2838 1566 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
377 alvherre 2839 GIC 1566 : CmdType commandType = relaction->mas_action->commandType;
2840 : TM_Result result;
2841 1566 : UpdateContext updateCxt = {0};
2842 :
2843 : /*
2844 : * Test condition, if any.
2845 : *
2846 : * In the absence of any condition, we perform the action
2847 : * unconditionally (no need to check separately since ExecQual() will
2848 : * return true if there are no conditions to evaluate).
377 alvherre 2849 ECB : */
377 alvherre 2850 GIC 1566 : if (!ExecQual(relaction->mas_whenqual, econtext))
2851 691 : continue;
2852 :
377 alvherre 2853 EUB : /*
2854 : * Check if the existing target tuple meets the USING checks of
377 alvherre 2855 ECB : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2856 : * error.
2857 : *
2858 : * The WITH CHECK quals are applied in ExecUpdate() and hence we need
2859 : * not do anything special to handle them.
2860 : *
2861 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2862 : * check policies only when they matter.
2863 : */
377 alvherre 2864 GIC 875 : if (resultRelInfo->ri_WithCheckOptions)
2865 : {
2866 30 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2867 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2868 : resultRelInfo,
377 alvherre 2869 ECB : resultRelInfo->ri_oldTupleSlot,
377 alvherre 2870 CBC 30 : context->mtstate->ps.state);
2871 : }
2872 :
2873 : /* Perform stated action */
377 alvherre 2874 GIC 863 : switch (commandType)
2875 : {
2876 726 : case CMD_UPDATE:
2877 :
2878 : /*
2879 : * Project the output tuple, and use that to update the table.
2880 : * We don't need to filter out junk attributes, because the
2881 : * UPDATE action's targetlist doesn't have any.
2882 : */
377 alvherre 2883 CBC 726 : newslot = ExecProject(relaction->mas_proj);
2884 :
2885 726 : context->relaction = relaction;
377 alvherre 2886 GIC 726 : if (!ExecUpdatePrologue(context, resultRelInfo,
2887 : tupleid, NULL, newslot, &result))
2888 : {
27 dean.a.rasheed 2889 CBC 9 : if (result == TM_Ok)
27 dean.a.rasheed 2890 GIC 14 : return true; /* "do nothing" */
2891 6 : break; /* concurrent update/delete */
2892 : }
377 alvherre 2893 CBC 717 : result = ExecUpdateAct(context, resultRelInfo, tupleid, NULL,
2894 : newslot, false, &updateCxt);
2895 710 : if (result == TM_Ok && updateCxt.updated)
2896 : {
377 alvherre 2897 GIC 693 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2898 : tupleid, NULL, newslot);
2899 693 : mtstate->mt_merge_updated += 1;
2900 : }
2901 710 : break;
377 alvherre 2902 ECB :
377 alvherre 2903 GIC 131 : case CMD_DELETE:
377 alvherre 2904 CBC 131 : context->relaction = relaction;
2905 131 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
2906 : NULL, NULL, &result))
2907 : {
27 dean.a.rasheed 2908 6 : if (result == TM_Ok)
2909 3 : return true; /* "do nothing" */
2910 3 : break; /* concurrent update/delete */
2911 : }
6 akorotkov 2912 125 : result = ExecDeleteAct(context, resultRelInfo, tupleid, false);
377 alvherre 2913 GIC 125 : if (result == TM_Ok)
377 alvherre 2914 ECB : {
377 alvherre 2915 GIC 119 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
377 alvherre 2916 ECB : false);
377 alvherre 2917 GIC 119 : mtstate->mt_merge_deleted += 1;
377 alvherre 2918 ECB : }
377 alvherre 2919 GIC 125 : break;
377 alvherre 2920 ECB :
377 alvherre 2921 GIC 6 : case CMD_NOTHING:
377 alvherre 2922 ECB : /* Doing nothing is always OK */
377 alvherre 2923 CBC 6 : result = TM_Ok;
2924 6 : break;
2925 :
377 alvherre 2926 UIC 0 : default:
377 alvherre 2927 LBC 0 : elog(ERROR, "unknown action in MERGE WHEN MATCHED clause");
377 alvherre 2928 ECB : }
2929 :
377 alvherre 2930 GIC 850 : switch (result)
377 alvherre 2931 ECB : {
377 alvherre 2932 CBC 818 : case TM_Ok:
2933 : /* all good; perform final actions */
143 2934 818 : if (canSetTag && commandType != CMD_NOTHING)
377 alvherre 2935 GIC 812 : (estate->es_processed)++;
377 alvherre 2936 ECB :
377 alvherre 2937 GIC 818 : break;
377 alvherre 2938 ECB :
377 alvherre 2939 GIC 6 : case TM_SelfModified:
377 alvherre 2940 ECB :
2941 : /*
2942 : * The SQL standard disallows this for MERGE.
2943 : */
377 alvherre 2944 GIC 6 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
377 alvherre 2945 GBC 6 : ereport(ERROR,
377 alvherre 2946 EUB : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2947 : /* translator: %s is a SQL command name */
2948 : errmsg("%s command cannot affect row a second time",
377 alvherre 2949 ECB : "MERGE"),
2950 : errhint("Ensure that not more than one source row matches any one target row.")));
2951 : /* This shouldn't happen */
377 alvherre 2952 UIC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
377 alvherre 2953 ECB : break;
2954 :
377 alvherre 2955 GIC 4 : case TM_Deleted:
377 alvherre 2956 CBC 4 : if (IsolationUsesXactSnapshot())
377 alvherre 2957 UIC 0 : ereport(ERROR,
377 alvherre 2958 ECB : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2959 : errmsg("could not serialize access due to concurrent delete")));
2960 :
2961 : /*
2962 : * If the tuple was already deleted, return to let caller
2963 : * handle it under NOT MATCHED clauses.
2964 : */
377 alvherre 2965 GIC 4 : return false;
2966 :
2967 22 : case TM_Updated:
2968 : {
2969 : Relation resultRelationDesc;
2970 : TupleTableSlot *epqslot,
377 alvherre 2971 EUB : *inputslot;
2972 : LockTupleMode lockmode;
2973 :
377 alvherre 2974 ECB : /*
2975 : * The target tuple was concurrently updated by some other
27 dean.a.rasheed 2976 EUB : * transaction. Run EvalPlanQual() with the new version of
2977 : * the tuple. If it does not return a tuple, then we
2978 : * switch to the NOT MATCHED list of actions. If it does
2979 : * return a tuple and the join qual is still satisfied,
2980 : * then we just need to recheck the MATCHED actions,
2981 : * starting from the top, and execute the first qualifying
2982 : * action.
2983 : */
377 alvherre 2984 CBC 22 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
377 alvherre 2985 GIC 22 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
377 alvherre 2986 ECB :
377 alvherre 2987 GIC 22 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
2988 : resultRelInfo->ri_RangeTableIndex);
2989 :
2990 22 : result = table_tuple_lock(resultRelationDesc, tupleid,
2991 : estate->es_snapshot,
2992 : inputslot, estate->es_output_cid,
2993 : lockmode, LockWaitBlock,
2994 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2995 : &context->tmfd);
2996 22 : switch (result)
2997 : {
2998 21 : case TM_Ok:
2999 21 : epqslot = EvalPlanQual(epqstate,
3000 : resultRelationDesc,
3001 : resultRelInfo->ri_RangeTableIndex,
3002 : inputslot);
377 alvherre 3003 ECB :
3004 : /*
3005 : * If we got no tuple, or the tuple we get has a
3006 : * NULL ctid, go back to caller: this one is not a
3007 : * MATCHED tuple anymore, so they can retry with
3008 : * NOT MATCHED actions.
3009 : */
377 alvherre 3010 GIC 21 : if (TupIsNull(epqslot))
377 alvherre 3011 UIC 0 : return false;
3012 :
377 alvherre 3013 GIC 21 : (void) ExecGetJunkAttribute(epqslot,
3014 21 : resultRelInfo->ri_RowIdAttNo,
377 alvherre 3015 ECB : &isNull);
377 alvherre 3016 GIC 21 : if (isNull)
377 alvherre 3017 CBC 3 : return false;
377 alvherre 3018 ECB :
3019 : /*
3020 : * When a tuple was updated and migrated to
3021 : * another partition concurrently, the current
3022 : * MERGE implementation can't follow. There's
3023 : * probably a better way to handle this case, but
3024 : * it'd require recognizing the relation to which
3025 : * the tuple moved, and setting our current
3026 : * resultRelInfo to that.
3027 : */
377 alvherre 3028 GIC 18 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
377 alvherre 3029 LBC 0 : ereport(ERROR,
377 alvherre 3030 EUB : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3031 : errmsg("tuple to be deleted was already moved to another partition due to concurrent update")));
377 alvherre 3032 ECB :
3033 : /*
3034 : * A non-NULL ctid means that we are still dealing
3035 : * with MATCHED case. Restart the loop so that we
3036 : * apply all the MATCHED rules again, to ensure
3037 : * that the first qualifying WHEN MATCHED action
3038 : * is executed.
3039 : *
3040 : * Update tupleid to that of the new tuple, for
3041 : * the refetch we do at the top.
3042 : */
377 alvherre 3043 GIC 18 : ItemPointerCopy(&context->tmfd.ctid, tupleid);
3044 18 : goto lmerge_matched;
3045 :
3046 1 : case TM_Deleted:
377 alvherre 3047 ECB :
377 alvherre 3048 EUB : /*
3049 : * tuple already deleted; tell caller to run NOT
3050 : * MATCHED actions
3051 : */
377 alvherre 3052 GIC 1 : return false;
3053 :
377 alvherre 3054 UIC 0 : case TM_SelfModified:
3055 :
3056 : /*
3057 : * This can be reached when following an update
3058 : * chain from a tuple updated by another session,
3059 : * reaching a tuple that was already updated in
3060 : * this transaction. If previously modified by
3061 : * this command, ignore the redundant update,
377 alvherre 3062 ECB : * otherwise error out.
3063 : *
3064 : * See also response to TM_SelfModified in
3065 : * ExecUpdate().
3066 : */
377 alvherre 3067 UIC 0 : if (context->tmfd.cmax != estate->es_output_cid)
3068 0 : ereport(ERROR,
3069 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3070 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
377 alvherre 3071 ECB : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
377 alvherre 3072 UIC 0 : return false;
377 alvherre 3073 EUB :
377 alvherre 3074 UIC 0 : default:
3075 : /* see table_tuple_lock call in ExecDelete() */
3076 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3077 : result);
3078 : return false;
3079 : }
3080 : }
3081 :
3082 0 : case TM_Invisible:
3083 : case TM_WouldBlock:
3084 : case TM_BeingModified:
3085 : /* these should not occur */
377 alvherre 3086 UBC 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
377 alvherre 3087 EUB : break;
3088 : }
3089 :
3090 : /*
3091 : * We've activated one of the WHEN clauses, so we don't search
3092 : * further. This is required behaviour, not an optimization.
3093 : */
377 alvherre 3094 GIC 818 : break;
377 alvherre 3095 EUB : }
3096 :
3097 : /*
3098 : * Successfully executed an action or no qualifying action was found.
3099 : */
377 alvherre 3100 GIC 1190 : return true;
377 alvherre 3101 EUB : }
3102 :
3103 : /*
3104 : * Execute the first qualifying NOT MATCHED action.
3105 : */
3106 : static void
377 alvherre 3107 GIC 1036 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3108 : bool canSetTag)
3109 : {
3110 1036 : ModifyTableState *mtstate = context->mtstate;
3111 1036 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3112 1036 : List *actionStates = NIL;
377 alvherre 3113 ECB : ListCell *l;
3114 :
3115 : /*
3116 : * For INSERT actions, the root relation's merge action is OK since the
3117 : * INSERT's targetlist and the WHEN conditions can only refer to the
3118 : * source relation and hence it does not matter which result relation we
3119 : * work with.
3120 : *
3121 : * XXX does this mean that we can avoid creating copies of actionStates on
3122 : * partitioned tables, for not-matched actions?
3123 : */
377 alvherre 3124 GIC 1036 : actionStates = resultRelInfo->ri_notMatchedMergeAction;
3125 :
377 alvherre 3126 ECB : /*
3127 : * Make source tuple available to ExecQual and ExecProject. We don't need
3128 : * the target tuple, since the WHEN quals and targetlist can't refer to
3129 : * the target columns.
3130 : */
377 alvherre 3131 CBC 1036 : econtext->ecxt_scantuple = NULL;
377 alvherre 3132 GIC 1036 : econtext->ecxt_innertuple = context->planSlot;
3133 1036 : econtext->ecxt_outertuple = NULL;
3134 :
3135 1318 : foreach(l, actionStates)
3136 : {
3137 1036 : MergeActionState *action = (MergeActionState *) lfirst(l);
3138 1036 : CmdType commandType = action->mas_action->commandType;
3139 : TupleTableSlot *newslot;
3140 :
3141 : /*
3142 : * Test condition, if any.
377 alvherre 3143 ECB : *
3144 : * In the absence of any condition, we perform the action
3145 : * unconditionally (no need to check separately since ExecQual() will
3146 : * return true if there are no conditions to evaluate).
3147 : */
377 alvherre 3148 GIC 1036 : if (!ExecQual(action->mas_whenqual, econtext))
3149 282 : continue;
377 alvherre 3150 ECB :
3151 : /* Perform stated action */
377 alvherre 3152 CBC 754 : switch (commandType)
3153 : {
3154 754 : case CMD_INSERT:
3155 :
377 alvherre 3156 ECB : /*
3157 : * Project the tuple. In case of a partitioned table, the
3158 : * projection was already built to use the root's descriptor,
3159 : * so we don't need to map the tuple here.
3160 : */
377 alvherre 3161 GIC 754 : newslot = ExecProject(action->mas_proj);
3162 754 : context->relaction = action;
3163 :
3164 754 : (void) ExecInsert(context, mtstate->rootResultRelInfo, newslot,
3165 : canSetTag, NULL, NULL);
3166 739 : mtstate->mt_merge_inserted += 1;
377 alvherre 3167 CBC 739 : break;
377 alvherre 3168 LBC 0 : case CMD_NOTHING:
3169 : /* Do nothing */
377 alvherre 3170 UIC 0 : break;
377 alvherre 3171 LBC 0 : default:
377 alvherre 3172 UIC 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
377 alvherre 3173 ECB : }
3174 :
3175 : /*
3176 : * We've activated one of the WHEN clauses, so we don't search
3177 : * further. This is required behaviour, not an optimization.
3178 : */
377 alvherre 3179 GIC 739 : break;
377 alvherre 3180 ECB : }
377 alvherre 3181 CBC 1021 : }
3182 :
377 alvherre 3183 ECB : /*
3184 : * Initialize state for execution of MERGE.
3185 : */
3186 : void
377 alvherre 3187 GBC 431 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3188 : {
3189 431 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3190 431 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
377 alvherre 3191 EUB : ResultRelInfo *resultRelInfo;
3192 : ExprContext *econtext;
3193 : ListCell *lc;
3194 : int i;
3195 :
377 alvherre 3196 GIC 431 : if (node->mergeActionLists == NIL)
377 alvherre 3197 UIC 0 : return;
377 alvherre 3198 ECB :
377 alvherre 3199 GIC 431 : mtstate->mt_merge_subcommands = 0;
377 alvherre 3200 ECB :
377 alvherre 3201 GIC 431 : if (mtstate->ps.ps_ExprContext == NULL)
3202 431 : ExecAssignExprContext(estate, &mtstate->ps);
3203 431 : econtext = mtstate->ps.ps_ExprContext;
3204 :
3205 : /*
377 alvherre 3206 ECB : * Create a MergeActionState for each action on the mergeActionList and
3207 : * add it to either a list of matched actions or not-matched actions.
3208 : *
3209 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3210 : * anything here, do so there too.
3211 : */
377 alvherre 3212 GIC 431 : i = 0;
3213 926 : foreach(lc, node->mergeActionLists)
3214 : {
377 alvherre 3215 CBC 495 : List *mergeActionList = lfirst(lc);
377 alvherre 3216 EUB : TupleDesc relationDesc;
3217 : ListCell *l;
377 alvherre 3218 ECB :
377 alvherre 3219 GIC 495 : resultRelInfo = mtstate->resultRelInfo + i;
377 alvherre 3220 CBC 495 : i++;
3221 495 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
377 alvherre 3222 ECB :
3223 : /* initialize slots for MERGE fetches from this rel */
377 alvherre 3224 GIC 495 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3225 495 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3226 :
3227 1289 : foreach(l, mergeActionList)
3228 : {
3229 794 : MergeAction *action = (MergeAction *) lfirst(l);
3230 : MergeActionState *action_state;
377 alvherre 3231 ECB : TupleTableSlot *tgtslot;
3232 : TupleDesc tgtdesc;
3233 : List **list;
3234 :
3235 : /*
3236 : * Build action merge state for this rel. (For partitions,
3237 : * equivalent code exists in ExecInitPartitionInfo.)
3238 : */
377 alvherre 3239 CBC 794 : action_state = makeNode(MergeActionState);
3240 794 : action_state->mas_action = action;
377 alvherre 3241 GIC 794 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3242 : &mtstate->ps);
377 alvherre 3243 ECB :
3244 : /*
3245 : * We create two lists - one for WHEN MATCHED actions and one for
3246 : * WHEN NOT MATCHED actions - and stick the MergeActionState into
3247 : * the appropriate list.
3248 : */
377 alvherre 3249 GIC 794 : if (action_state->mas_action->matched)
3250 477 : list = &resultRelInfo->ri_matchedMergeAction;
3251 : else
3252 317 : list = &resultRelInfo->ri_notMatchedMergeAction;
3253 794 : *list = lappend(*list, action_state);
3254 :
3255 794 : switch (action->commandType)
3256 : {
3257 313 : case CMD_INSERT:
377 alvherre 3258 CBC 313 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
377 alvherre 3259 ECB : action->targetList);
3260 :
3261 : /*
3262 : * If the MERGE targets a partitioned table, any INSERT
3263 : * actions must be routed through it, not the child
3264 : * relations. Initialize the routing struct and the root
3265 : * table's "new" tuple slot for that, if not already done.
3266 : * The projection we prepare, for all relations, uses the
3267 : * root relation descriptor, and targets the plan's root
3268 : * slot. (This is consistent with the fact that we
3269 : * checked the plan output to match the root relation,
3270 : * above.)
3271 : */
377 alvherre 3272 CBC 313 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3273 : RELKIND_PARTITIONED_TABLE)
377 alvherre 3274 ECB : {
377 alvherre 3275 GIC 93 : if (mtstate->mt_partition_tuple_routing == NULL)
377 alvherre 3276 ECB : {
3277 : /*
3278 : * Initialize planstate for routing if not already
3279 : * done.
3280 : *
3281 : * Note that the slot is managed as a standalone
3282 : * slot belonging to ModifyTableState, so we pass
3283 : * NULL for the 2nd argument.
3284 : */
377 alvherre 3285 GIC 45 : mtstate->mt_root_tuple_slot =
3286 45 : table_slot_create(rootRelInfo->ri_RelationDesc,
3287 : NULL);
3288 45 : mtstate->mt_partition_tuple_routing =
3289 45 : ExecSetupPartitionTupleRouting(estate,
3290 : rootRelInfo->ri_RelationDesc);
377 alvherre 3291 ECB : }
377 alvherre 3292 GIC 93 : tgtslot = mtstate->mt_root_tuple_slot;
3293 93 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
377 alvherre 3294 ECB : }
3295 : else
3296 : {
3297 : /* not partitioned? use the stock relation and slot */
377 alvherre 3298 GIC 220 : tgtslot = resultRelInfo->ri_newTupleSlot;
3299 220 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3300 : }
3301 :
3302 313 : action_state->mas_proj =
3303 313 : ExecBuildProjectionInfo(action->targetList, econtext,
377 alvherre 3304 ECB : tgtslot,
3305 : &mtstate->ps,
3306 : tgtdesc);
3307 :
377 alvherre 3308 CBC 313 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
377 alvherre 3309 GIC 313 : break;
3310 364 : case CMD_UPDATE:
377 alvherre 3311 CBC 364 : action_state->mas_proj =
3312 364 : ExecBuildUpdateProjection(action->targetList,
3313 : true,
3314 : action->updateColnos,
3315 : relationDesc,
3316 : econtext,
377 alvherre 3317 ECB : resultRelInfo->ri_newTupleSlot,
3318 : &mtstate->ps);
377 alvherre 3319 GIC 364 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3320 364 : break;
377 alvherre 3321 CBC 106 : case CMD_DELETE:
3322 106 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
377 alvherre 3323 GIC 106 : break;
3324 11 : case CMD_NOTHING:
3325 11 : break;
377 alvherre 3326 UIC 0 : default:
377 alvherre 3327 LBC 0 : elog(ERROR, "unknown operation");
377 alvherre 3328 ECB : break;
3329 : }
3330 : }
3331 : }
3332 : }
3333 :
3334 : /*
3335 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3336 : *
3337 : * We mark 'projectNewInfoValid' even though the projections themselves
3338 : * are not initialized here.
3339 : */
3340 : void
377 alvherre 3341 CBC 517 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
377 alvherre 3342 ECB : ResultRelInfo *resultRelInfo)
3343 : {
377 alvherre 3344 CBC 517 : EState *estate = mtstate->ps.state;
377 alvherre 3345 EUB :
377 alvherre 3346 GBC 517 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3347 :
377 alvherre 3348 GIC 517 : resultRelInfo->ri_oldTupleSlot =
3349 517 : table_slot_create(resultRelInfo->ri_RelationDesc,
3350 : &estate->es_tupleTable);
3351 517 : resultRelInfo->ri_newTupleSlot =
3352 517 : table_slot_create(resultRelInfo->ri_RelationDesc,
3353 : &estate->es_tupleTable);
3354 517 : resultRelInfo->ri_projectNewInfoValid = true;
3355 517 : }
3356 :
3357 : /*
3358 : * Process BEFORE EACH STATEMENT triggers
3359 : */
4929 tgl 3360 ECB : static void
4929 tgl 3361 GIC 65123 : fireBSTriggers(ModifyTableState *node)
3362 : {
1847 alvherre 3363 CBC 65123 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
902 heikki.linnakangas 3364 GIC 65123 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
2169 rhaas 3365 ECB :
4929 tgl 3366 GIC 65123 : switch (node->operation)
4929 tgl 3367 ECB : {
4929 tgl 3368 CBC 51426 : case CMD_INSERT:
2169 rhaas 3369 GIC 51426 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
1847 alvherre 3370 CBC 51420 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
2893 andres 3371 414 : ExecBSUpdateTriggers(node->ps.state,
3372 : resultRelInfo);
4929 tgl 3373 51420 : break;
3374 7414 : case CMD_UPDATE:
2169 rhaas 3375 GIC 7414 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4929 tgl 3376 7414 : break;
3377 5879 : case CMD_DELETE:
2169 rhaas 3378 5879 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4929 tgl 3379 5879 : break;
377 alvherre 3380 CBC 404 : case CMD_MERGE:
377 alvherre 3381 GIC 404 : if (node->mt_merge_subcommands & MERGE_INSERT)
377 alvherre 3382 CBC 238 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3383 404 : if (node->mt_merge_subcommands & MERGE_UPDATE)
377 alvherre 3384 GIC 264 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
377 alvherre 3385 CBC 404 : if (node->mt_merge_subcommands & MERGE_DELETE)
377 alvherre 3386 GIC 88 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
377 alvherre 3387 CBC 404 : break;
4929 tgl 3388 LBC 0 : default:
3389 0 : elog(ERROR, "unknown operation");
4929 tgl 3390 ECB : break;
3391 : }
4929 tgl 3392 CBC 65117 : }
4929 tgl 3393 ECB :
2111 rhodiumtoad 3394 : /*
3395 : * Process AFTER EACH STATEMENT triggers
3396 : */
3397 : static void
2111 rhodiumtoad 3398 CBC 63735 : fireASTriggers(ModifyTableState *node)
2111 rhodiumtoad 3399 ECB : {
1847 alvherre 3400 CBC 63735 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
902 heikki.linnakangas 3401 63735 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
2169 rhaas 3402 ECB :
4929 tgl 3403 CBC 63735 : switch (node->operation)
4929 tgl 3404 ECB : {
4929 tgl 3405 CBC 50422 : case CMD_INSERT:
1847 alvherre 3406 50422 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
2893 andres 3407 GBC 363 : ExecASUpdateTriggers(node->ps.state,
2111 rhodiumtoad 3408 EUB : resultRelInfo,
2031 tgl 3409 GIC 363 : node->mt_oc_transition_capture);
2111 rhodiumtoad 3410 50422 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
2111 rhodiumtoad 3411 CBC 50422 : node->mt_transition_capture);
4929 tgl 3412 GIC 50422 : break;
3413 7131 : case CMD_UPDATE:
2111 rhodiumtoad 3414 7131 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3415 7131 : node->mt_transition_capture);
4929 tgl 3416 7131 : break;
4929 tgl 3417 CBC 5818 : case CMD_DELETE:
2111 rhodiumtoad 3418 GIC 5818 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
2111 rhodiumtoad 3419 CBC 5818 : node->mt_transition_capture);
4929 tgl 3420 5818 : break;
377 alvherre 3421 GIC 364 : case CMD_MERGE:
377 alvherre 3422 CBC 364 : if (node->mt_merge_subcommands & MERGE_DELETE)
377 alvherre 3423 GIC 73 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
377 alvherre 3424 CBC 73 : node->mt_transition_capture);
3425 364 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3426 239 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
377 alvherre 3427 GIC 239 : node->mt_transition_capture);
377 alvherre 3428 CBC 364 : if (node->mt_merge_subcommands & MERGE_INSERT)
3429 222 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3430 222 : node->mt_transition_capture);
3431 364 : break;
4929 tgl 3432 LBC 0 : default:
3433 0 : elog(ERROR, "unknown operation");
4929 tgl 3434 ECB : break;
3435 : }
4929 tgl 3436 CBC 63735 : }
4929 tgl 3437 ECB :
2111 rhodiumtoad 3438 : /*
3439 : * Set up the state needed for collecting transition tuples for AFTER
3440 : * triggers.
3441 : */
3442 : static void
2111 rhodiumtoad 3443 CBC 65269 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
2111 rhodiumtoad 3444 ECB : {
1847 alvherre 3445 CBC 65269 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
902 heikki.linnakangas 3446 65269 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
2111 rhodiumtoad 3447 ECB :
3448 : /* Check for transition tables on the directly targeted relation. */
2111 rhodiumtoad 3449 CBC 65269 : mtstate->mt_transition_capture =
2031 tgl 3450 65269 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
2031 tgl 3451 GBC 65269 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
2031 tgl 3452 EUB : mtstate->operation);
1847 alvherre 3453 GIC 65269 : if (plan->operation == CMD_INSERT &&
3454 51427 : plan->onConflictAction == ONCONFLICT_UPDATE)
2031 tgl 3455 CBC 414 : mtstate->mt_oc_transition_capture =
2031 tgl 3456 GIC 414 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3457 414 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3458 : CMD_UPDATE);
1906 rhaas 3459 65269 : }
3460 :
3461 : /*
1847 alvherre 3462 ECB : * ExecPrepareTupleRouting --- prepare for routing one tuple
3463 : *
3464 : * Determine the partition in which the tuple in slot is to be inserted,
907 heikki.linnakangas 3465 : * and return its ResultRelInfo in *partRelInfo. The return value is
3466 : * a slot holding the tuple of the partition rowtype.
3467 : *
3468 : * This also sets the transition table information in mtstate based on the
3469 : * selected partition.
1847 alvherre 3470 : */
3471 : static TupleTableSlot *
1847 alvherre 3472 CBC 360403 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
1847 alvherre 3473 ECB : EState *estate,
3474 : PartitionTupleRouting *proute,
3475 : ResultRelInfo *targetRelInfo,
907 heikki.linnakangas 3476 : TupleTableSlot *slot,
3477 : ResultRelInfo **partRelInfo)
1847 alvherre 3478 : {
3479 : ResultRelInfo *partrel;
3480 : TupleConversionMap *map;
3481 :
3482 : /*
3483 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3484 : * not find a valid partition for the tuple in 'slot' then an error is
3485 : * raised. An error may also be raised if the found partition is not a
3486 : * valid target for INSERTs. This is required since a partitioned table
3487 : * UPDATE to another partition becomes a DELETE+INSERT.
3488 : */
1605 alvherre 3489 GIC 360403 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3490 :
1847 alvherre 3491 ECB : /*
3492 : * If we're capturing transition tuples, we might need to convert from the
3493 : * partition rowtype to root partitioned table's rowtype. But if there
3494 : * are no BEFORE triggers on the partition that could change the tuple, we
3495 : * can just remember the original unconverted tuple to avoid a needless
3496 : * round trip conversion.
3497 : */
1847 alvherre 3498 GIC 360304 : if (mtstate->mt_transition_capture != NULL)
3499 : {
3500 : bool has_before_insert_row_trig;
3501 :
902 heikki.linnakangas 3502 84 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3503 21 : partrel->ri_TrigDesc->trig_insert_before_row);
3504 :
3505 63 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3506 63 : !has_before_insert_row_trig ? slot : NULL;
3507 : }
1847 alvherre 3508 ECB :
3509 : /*
3510 : * Convert the tuple, if necessary.
3511 : */
128 alvherre 3512 GNC 360304 : map = ExecGetRootToChildMap(partrel, estate);
1650 andres 3513 GIC 360304 : if (map != NULL)
3514 : {
902 heikki.linnakangas 3515 34155 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3516 :
1650 andres 3517 CBC 34155 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3518 : }
3519 :
907 heikki.linnakangas 3520 GIC 360304 : *partRelInfo = partrel;
1847 alvherre 3521 CBC 360304 : return slot;
1847 alvherre 3522 ECB : }
3523 :
4929 tgl 3524 : /* ----------------------------------------------------------------
3525 : * ExecModifyTable
3526 : *
3527 : * Perform table modifications as required, and return RETURNING results
3528 : * if needed.
3529 : * ----------------------------------------------------------------
3530 : */
2092 andres 3531 : static TupleTableSlot *
2092 andres 3532 CBC 69002 : ExecModifyTable(PlanState *pstate)
3533 : {
3534 69002 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3535 : ModifyTableContext context;
4790 bruce 3536 69002 : EState *estate = node->ps.state;
4790 bruce 3537 GIC 69002 : CmdType operation = node->operation;
3538 : ResultRelInfo *resultRelInfo;
4790 bruce 3539 ECB : PlanState *subplanstate;
4929 tgl 3540 : TupleTableSlot *slot;
3541 : TupleTableSlot *oldSlot;
3542 : ItemPointerData tuple_ctid;
3543 : HeapTupleData oldtupdata;
3544 : HeapTuple oldtuple;
3545 : ItemPointer tupleid;
3546 :
2084 andres 3547 GIC 69002 : CHECK_FOR_INTERRUPTS();
3548 :
3549 : /*
3550 : * This should NOT get called during EvalPlanQual; we should have passed a
4089 tgl 3551 ECB : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3552 : * Assert because this condition is easy to miss in testing. (Note:
3553 : * although ModifyTable should not get executed within an EvalPlanQual
3554 : * operation, we do have to allow it to be initialized and shut down in
3555 : * case it is within a CTE subplan. Hence this test must be here, not in
3556 : * ExecInitModifyTable.)
3557 : */
1312 andres 3558 GIC 69002 : if (estate->es_epq_active != NULL)
4089 tgl 3559 UIC 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3560 :
3561 : /*
3562 : * If we've already completed processing, don't try to do more. We need
3563 : * this test because ExecPostprocessPlan might call us an extra time, and
3564 : * our subplan's nodes aren't necessarily robust against being called
3565 : * extra times.
4426 tgl 3566 ECB : */
4426 tgl 3567 GIC 69002 : if (node->mt_done)
3568 385 : return NULL;
3569 :
3570 : /*
3571 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3572 : */
4929 3573 68617 : if (node->fireBSTriggers)
3574 : {
3575 65123 : fireBSTriggers(node);
3576 65117 : node->fireBSTriggers = false;
4929 tgl 3577 ECB : }
4929 tgl 3578 EUB :
3579 : /* Preload local variables */
739 tgl 3580 GIC 68611 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3581 68611 : subplanstate = outerPlanState(node);
3582 :
3583 : /* Set global context */
388 alvherre 3584 68611 : context.mtstate = node;
3585 68611 : context.epqstate = &node->mt_epqstate;
388 alvherre 3586 CBC 68611 : context.estate = estate;
388 alvherre 3587 ECB :
3588 : /*
3589 : * Fetch rows from subplan, and execute the required table modification
3590 : * for each row.
3591 : */
4929 tgl 3592 : for (;;)
3593 : {
4617 3594 : /*
3260 bruce 3595 : * Reset the per-output-tuple exprcontext. This is needed because
3596 : * triggers expect to use that context as workspace. It's a bit ugly
3597 : * to do this below the top level of the plan, however. We might need
3598 : * to rethink this later.
4617 tgl 3599 : */
4617 tgl 3600 CBC 7125241 : ResetPerTupleExprContext(estate);
3601 :
3602 : /*
1608 andres 3603 ECB : * Reset per-tuple memory context used for processing on conflict and
3604 : * returning clauses, to free any expression evaluation storage
3605 : * allocated in the previous cycle.
3606 : */
1608 andres 3607 GIC 7125241 : if (pstate->ps_ExprContext)
3608 195040 : ResetExprContext(pstate->ps_ExprContext);
3609 :
354 alvherre 3610 7125241 : context.planSlot = ExecProcNode(subplanstate);
3611 :
3612 : /* No more tuples to process? */
3613 7125074 : if (TupIsNull(context.planSlot))
3614 : break;
3615 :
3616 : /*
3617 : * When there are multiple result relations, each tuple contains a
3618 : * junk column that gives the OID of the rel from which it came.
739 tgl 3619 ECB : * Extract it and select the correct result relation.
3620 : */
739 tgl 3621 GIC 7061339 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3622 : {
3623 : Datum datum;
3624 : bool isNull;
3625 : Oid resultoid;
739 tgl 3626 ECB :
354 alvherre 3627 CBC 2107 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3628 : &isNull);
739 tgl 3629 2107 : if (isNull)
3630 : {
3631 : /*
377 alvherre 3632 ECB : * For commands other than MERGE, any tuples having InvalidOid
3633 : * for tableoid are errors. For MERGE, we may need to handle
3634 : * them as WHEN NOT MATCHED clauses if any, so do that.
3635 : *
3636 : * Note that we use the node's toplevel resultRelInfo, not any
3637 : * specific partition's.
3638 : */
377 alvherre 3639 GIC 185 : if (operation == CMD_MERGE)
377 alvherre 3640 ECB : {
354 alvherre 3641 GIC 185 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3642 :
377 3643 185 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3644 185 : continue; /* no RETURNING support yet */
3645 : }
377 alvherre 3646 ECB :
739 tgl 3647 UIC 0 : elog(ERROR, "tableoid is NULL");
377 alvherre 3648 ECB : }
739 tgl 3649 GIC 1922 : resultoid = DatumGetObjectId(datum);
3650 :
3651 : /* If it's not the same as last time, we need to locate the rel */
3652 1922 : if (resultoid != node->mt_lastResultOid)
733 3653 1315 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3654 : false, true);
3655 : }
3656 :
3657 : /*
2578 rhaas 3658 ECB : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3659 : * here is compute the RETURNING expressions.
3660 : */
2578 rhaas 3661 GIC 7061154 : if (resultRelInfo->ri_usesFdwDirectModify)
2578 rhaas 3662 ECB : {
2578 rhaas 3663 CBC 347 : Assert(resultRelInfo->ri_projectReturning);
3664 :
3665 : /*
2578 rhaas 3666 EUB : * A scan slot containing the data that was actually inserted,
3667 : * updated or deleted has already been made available to
2578 rhaas 3668 ECB : * ExecProcessReturning by IterateDirectModify, so no need to
3669 : * provide it here.
3670 : */
354 alvherre 3671 CBC 347 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
2578 rhaas 3672 ECB :
2578 rhaas 3673 GIC 347 : return slot;
3674 : }
3675 :
354 alvherre 3676 7060807 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3677 7060807 : slot = context.planSlot;
3678 :
1959 tgl 3679 7060807 : tupleid = NULL;
3304 noah 3680 CBC 7060807 : oldtuple = NULL;
3681 :
739 tgl 3682 ECB : /*
3683 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3684 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3685 : * otherwise we may have a wholerow junk attr that carries the old
3686 : * tuple in toto. Keep this in step with the part of
3687 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3688 : */
377 alvherre 3689 GIC 7060807 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
377 alvherre 3690 ECB : operation == CMD_MERGE)
3691 : {
739 tgl 3692 : char relkind;
3693 : Datum datum;
3694 : bool isNull;
3695 :
739 tgl 3696 CBC 1028723 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
739 tgl 3697 GIC 1028723 : if (relkind == RELKIND_RELATION ||
739 tgl 3698 CBC 177 : relkind == RELKIND_MATVIEW ||
739 tgl 3699 ECB : relkind == RELKIND_PARTITIONED_TABLE)
3700 : {
3701 : /* ri_RowIdAttNo refers to a ctid attribute */
739 tgl 3702 GIC 1028549 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3703 1028549 : datum = ExecGetJunkAttribute(slot,
3704 1028549 : resultRelInfo->ri_RowIdAttNo,
3705 : &isNull);
3706 :
3707 : /*
377 alvherre 3708 ECB : * For commands other than MERGE, any tuples having a null row
3709 : * identifier are errors. For MERGE, we may need to handle
3710 : * them as WHEN NOT MATCHED clauses if any, so do that.
3711 : *
3712 : * Note that we use the node's toplevel resultRelInfo, not any
3713 : * specific partition's.
3714 : */
739 tgl 3715 CBC 1028549 : if (isNull)
377 alvherre 3716 ECB : {
377 alvherre 3717 CBC 843 : if (operation == CMD_MERGE)
3718 : {
354 alvherre 3719 GIC 843 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3720 :
377 alvherre 3721 CBC 843 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3722 828 : continue; /* no RETURNING support yet */
377 alvherre 3723 ECB : }
3724 :
739 tgl 3725 UIC 0 : elog(ERROR, "ctid is NULL");
3726 : }
3727 :
739 tgl 3728 GIC 1027706 : tupleid = (ItemPointer) DatumGetPointer(datum);
3729 1027706 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3730 1027706 : tupleid = &tuple_ctid;
3731 : }
3732 :
3733 : /*
739 tgl 3734 ECB : * Use the wholerow attribute, when available, to reconstruct the
3735 : * old relation tuple. The old tuple serves one or both of two
3736 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3737 : * provides values for any unchanged columns for the NEW tuple of
3738 : * an UPDATE, because the subplan does not produce all the columns
3739 : * of the target table.
3740 : *
3741 : * Note that the wholerow attribute does not carry system columns,
3742 : * so foreign table triggers miss seeing those, except that we
3743 : * know enough here to set t_tableOid. Quite separately from
739 tgl 3744 EUB : * this, the FDW may fetch its own junk attrs to identify the row.
3745 : *
3746 : * Other relevant relkinds, currently limited to views, always
739 tgl 3747 ECB : * have a wholerow attribute.
4929 3748 : */
739 tgl 3749 CBC 174 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3750 : {
739 tgl 3751 GIC 165 : datum = ExecGetJunkAttribute(slot,
3752 165 : resultRelInfo->ri_RowIdAttNo,
3753 : &isNull);
3754 : /* shouldn't ever get a null result... */
3755 165 : if (isNull)
739 tgl 3756 UIC 0 : elog(ERROR, "wholerow is NULL");
3757 :
739 tgl 3758 GIC 165 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
3759 165 : oldtupdata.t_len =
3760 165 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
3761 165 : ItemPointerSetInvalid(&(oldtupdata.t_self));
3762 : /* Historically, view triggers see invalid t_tableOid. */
3763 165 : oldtupdata.t_tableOid =
3764 165 : (relkind == RELKIND_VIEW) ? InvalidOid :
3765 81 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
3766 :
3767 165 : oldtuple = &oldtupdata;
739 tgl 3768 ECB : }
3769 : else
3770 : {
3771 : /* Only foreign tables are allowed to omit a row-ID attr */
739 tgl 3772 GIC 9 : Assert(relkind == RELKIND_FOREIGN_TABLE);
3773 : }
4929 tgl 3774 ECB : }
4929 tgl 3775 EUB :
4929 tgl 3776 GIC 7059964 : switch (operation)
4929 tgl 3777 ECB : {
4929 tgl 3778 CBC 6032084 : case CMD_INSERT:
733 tgl 3779 ECB : /* Initialize projection info if first time for this table */
733 tgl 3780 CBC 6032084 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
733 tgl 3781 GIC 49593 : ExecInitInsertProjection(node, resultRelInfo);
354 alvherre 3782 CBC 6032084 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
388 3783 6032084 : slot = ExecInsert(&context, resultRelInfo, slot,
385 3784 6032084 : node->canSetTag, NULL, NULL);
4929 tgl 3785 GIC 6031152 : break;
388 alvherre 3786 ECB :
4929 tgl 3787 GIC 183263 : case CMD_UPDATE:
3788 : /* Initialize projection info if first time for this table */
733 3789 183263 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3790 7288 : ExecInitUpdateProjection(node, resultRelInfo);
739 tgl 3791 ECB :
3792 : /*
3793 : * Make the new tuple by combining plan's output tuple with
3794 : * the old tuple being updated.
3795 : */
739 tgl 3796 GIC 183263 : oldSlot = resultRelInfo->ri_oldTupleSlot;
739 tgl 3797 CBC 183263 : if (oldtuple != NULL)
3798 : {
739 tgl 3799 ECB : /* Use the wholerow junk attr as the old tuple. */
739 tgl 3800 CBC 129 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
739 tgl 3801 ECB : }
3802 : else
3803 : {
3804 : /* Fetch the most recent version of old tuple. */
739 tgl 3805 GIC 183134 : Relation relation = resultRelInfo->ri_RelationDesc;
739 tgl 3806 ECB :
739 tgl 3807 GIC 183134 : if (!table_tuple_fetch_row_version(relation, tupleid,
739 tgl 3808 ECB : SnapshotAny,
3809 : oldSlot))
739 tgl 3810 UIC 0 : elog(ERROR, "failed to fetch tuple being updated");
3811 : }
27 dean.a.rasheed 3812 GIC 183263 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
3813 : oldSlot);
377 alvherre 3814 183263 : context.relaction = NULL;
739 tgl 3815 ECB :
3816 : /* Now apply the update. */
388 alvherre 3817 GIC 183263 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
6 akorotkov 3818 183263 : slot, node->canSetTag);
4929 tgl 3819 CBC 183066 : break;
3820 :
4929 tgl 3821 GIC 843124 : case CMD_DELETE:
388 alvherre 3822 843124 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
3823 843124 : true, false, node->canSetTag, NULL, NULL);
4929 tgl 3824 CBC 843084 : break;
3825 :
377 alvherre 3826 1493 : case CMD_MERGE:
377 alvherre 3827 GIC 1493 : slot = ExecMerge(&context, resultRelInfo, tupleid, node->canSetTag);
3828 1468 : break;
377 alvherre 3829 EUB :
4929 tgl 3830 UIC 0 : default:
4929 tgl 3831 LBC 0 : elog(ERROR, "unknown operation");
3832 : break;
4929 tgl 3833 ECB : }
3834 :
3835 : /*
3836 : * If we got a RETURNING result, return it to caller. We'll continue
3837 : * the work on next call.
3838 : */
4929 tgl 3839 GIC 7058770 : if (slot)
4929 tgl 3840 CBC 3153 : return slot;
4929 tgl 3841 ECB : }
3842 :
809 tomas.vondra 3843 : /*
3844 : * Insert remaining tuples for batch insert.
3845 : */
135 efujita 3846 CBC 63735 : if (estate->es_insert_pending_result_relations != NIL)
3847 12 : ExecPendingInserts(estate);
3848 :
4929 tgl 3849 EUB : /*
3850 : * We're done, but fire AFTER STATEMENT triggers before exiting.
3851 : */
4929 tgl 3852 GIC 63735 : fireASTriggers(node);
3853 :
4426 3854 63735 : node->mt_done = true;
3855 :
4929 3856 63735 : return NULL;
3857 : }
4929 tgl 3858 ECB :
733 3859 : /*
3860 : * ExecLookupResultRelByOid
3861 : * If the table with given OID is among the result relations to be
3862 : * updated by the given ModifyTable node, return its ResultRelInfo.
3863 : *
3864 : * If not found, return NULL if missing_ok, else raise error.
3865 : *
3866 : * If update_cache is true, then upon successful lookup, update the node's
3867 : * one-element cache. ONLY ExecModifyTable may pass true for this.
3868 : */
3869 : ResultRelInfo *
733 tgl 3870 GIC 5239 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
733 tgl 3871 ECB : bool missing_ok, bool update_cache)
3872 : {
733 tgl 3873 CBC 5239 : if (node->mt_resultOidHash)
3874 : {
733 tgl 3875 ECB : /* Use the pre-built hash table to locate the rel */
3876 : MTTargetRelLookup *mtlookup;
3877 :
3878 : mtlookup = (MTTargetRelLookup *)
733 tgl 3879 GIC 427 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
3880 427 : if (mtlookup)
3881 : {
3882 427 : if (update_cache)
3883 : {
3884 325 : node->mt_lastResultOid = resultoid;
3885 325 : node->mt_lastResultIndex = mtlookup->relationIndex;
3886 : }
3887 427 : return node->resultRelInfo + mtlookup->relationIndex;
3888 : }
733 tgl 3889 ECB : }
3890 : else
3891 : {
3892 : /* With few target rels, just search the ResultRelInfo array */
733 tgl 3893 GIC 8894 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
3894 : {
3895 5163 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
3896 :
3897 5163 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
733 tgl 3898 ECB : {
733 tgl 3899 CBC 1081 : if (update_cache)
3900 : {
3901 990 : node->mt_lastResultOid = resultoid;
733 tgl 3902 GIC 990 : node->mt_lastResultIndex = ndx;
733 tgl 3903 ECB : }
733 tgl 3904 CBC 1081 : return rInfo;
3905 : }
733 tgl 3906 ECB : }
3907 : }
3908 :
733 tgl 3909 GIC 3731 : if (!missing_ok)
733 tgl 3910 UIC 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
733 tgl 3911 GIC 3731 : return NULL;
733 tgl 3912 ECB : }
3913 :
4929 3914 : /* ----------------------------------------------------------------
3915 : * ExecInitModifyTable
3916 : * ----------------------------------------------------------------
3917 : */
3918 : ModifyTableState *
4929 tgl 3919 GIC 65639 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4929 tgl 3920 ECB : {
3921 : ModifyTableState *mtstate;
739 tgl 3922 GIC 65639 : Plan *subplan = outerPlan(node);
4929 tgl 3923 CBC 65639 : CmdType operation = node->operation;
739 tgl 3924 GIC 65639 : int nrels = list_length(node->resultRelations);
3925 : ResultRelInfo *resultRelInfo;
3926 : List *arowmarks;
3927 : ListCell *l;
4929 tgl 3928 ECB : int i;
2314 rhaas 3929 EUB : Relation rel;
4929 tgl 3930 ECB :
3931 : /* check for unsupported flags */
4929 tgl 3932 GIC 65639 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3933 :
3934 : /*
3935 : * create state structure
3936 : */
3937 65639 : mtstate = makeNode(ModifyTableState);
4929 tgl 3938 CBC 65639 : mtstate->ps.plan = (Plan *) node;
4929 tgl 3939 GIC 65639 : mtstate->ps.state = estate;
2092 andres 3940 65639 : mtstate->ps.ExecProcNode = ExecModifyTable;
4929 tgl 3941 ECB :
4426 tgl 3942 CBC 65639 : mtstate->operation = operation;
3943 65639 : mtstate->canSetTag = node->canSetTag;
4426 tgl 3944 GIC 65639 : mtstate->mt_done = false;
3945 :
739 3946 65639 : mtstate->mt_nrels = nrels;
908 heikki.linnakangas 3947 65639 : mtstate->resultRelInfo = (ResultRelInfo *)
739 tgl 3948 65639 : palloc(nrels * sizeof(ResultRelInfo));
3949 :
377 alvherre 3950 65639 : mtstate->mt_merge_inserted = 0;
377 alvherre 3951 CBC 65639 : mtstate->mt_merge_updated = 0;
377 alvherre 3952 GIC 65639 : mtstate->mt_merge_deleted = 0;
3953 :
3954 : /*----------
3955 : * Resolve the target relation. This is the same as:
902 heikki.linnakangas 3956 ECB : *
3957 : * - the relation for which we will fire FOR STATEMENT triggers,
3958 : * - the relation into whose tuple format all captured transition tuples
3959 : * must be converted, and
3960 : * - the root partitioned table used for tuple routing.
3961 : *
3962 : * If it's a partitioned table, the root partition doesn't appear
3963 : * elsewhere in the plan and its RT index is given explicitly in
3964 : * node->rootRelation. Otherwise (i.e. table inheritance) the target
3965 : * relation is the first relation in the node->resultRelations list.
3966 : *----------
3967 : */
908 heikki.linnakangas 3968 GIC 65639 : if (node->rootRelation > 0)
908 heikki.linnakangas 3969 ECB : {
908 heikki.linnakangas 3970 CBC 3595 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
3971 3595 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
3972 : node->rootRelation);
3973 : }
3974 : else
3975 : {
902 heikki.linnakangas 3976 GIC 62044 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
3977 62044 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
3978 62044 : linitial_int(node->resultRelations));
3979 : }
3980 :
3981 : /* set up epqstate with dummy subplan data for the moment */
4470 tgl 3982 65639 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
4929 3983 65639 : mtstate->fireBSTriggers = true;
3984 :
3985 : /*
3986 : * Build state for collecting transition tuples. This requires having a
902 heikki.linnakangas 3987 ECB : * valid trigger query context, so skip it in explain-only mode.
3988 : */
902 heikki.linnakangas 3989 CBC 65639 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
3990 65269 : ExecSetupTransitionCaptureState(mtstate, estate);
3991 :
3992 : /*
3993 : * Open all the result relations and initialize the ResultRelInfo structs.
3994 : * (But root relation was initialized above, if it's part of the array.)
739 tgl 3995 ECB : * We must do this before initializing the subplan, because direct-modify
3996 : * FDWs expect their ResultRelInfos to be available.
4929 3997 : */
4426 tgl 3998 GIC 65639 : resultRelInfo = mtstate->resultRelInfo;
4929 3999 65639 : i = 0;
739 4000 132229 : foreach(l, node->resultRelations)
4929 tgl 4001 ECB : {
908 heikki.linnakangas 4002 CBC 66724 : Index resultRelation = lfirst_int(l);
4003 :
902 heikki.linnakangas 4004 GIC 66724 : if (resultRelInfo != mtstate->rootResultRelInfo)
4005 : {
4006 4680 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4007 :
733 tgl 4008 ECB : /*
4009 : * For child result relations, store the root result relation
4010 : * pointer. We do so for the convenience of places that want to
4011 : * look at the query's original target relation but don't have the
4012 : * mtstate handy.
4013 : */
733 tgl 4014 GIC 4680 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4015 : }
4016 :
2578 rhaas 4017 ECB : /* Initialize the usesFdwDirectModify flag */
388 alvherre 4018 CBC 66724 : resultRelInfo->ri_usesFdwDirectModify =
4019 66724 : bms_is_member(i, node->fdwDirectModifyPlans);
4020 :
4426 tgl 4021 ECB : /*
4022 : * Verify result relation is a valid target for the current operation
4023 : */
2040 rhaas 4024 GIC 66724 : CheckValidResultRel(resultRelInfo, operation);
4426 tgl 4025 ECB :
739 tgl 4026 GIC 66590 : resultRelInfo++;
4027 66590 : i++;
4028 : }
4029 :
4030 : /*
4031 : * Now we may initialize the subplan.
4032 : */
739 tgl 4033 CBC 65505 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4034 :
4035 : /*
4036 : * Do additional per-result-relation initialization.
739 tgl 4037 ECB : */
739 tgl 4038 CBC 132078 : for (i = 0; i < nrels; i++)
4039 : {
739 tgl 4040 GIC 66573 : resultRelInfo = &mtstate->resultRelInfo[i];
4041 :
4042 : /* Let FDWs init themselves for foreign-table result rels */
2578 rhaas 4043 CBC 66573 : if (!resultRelInfo->ri_usesFdwDirectModify &&
2578 rhaas 4044 GIC 66469 : resultRelInfo->ri_FdwRoutine != NULL &&
3682 tgl 4045 CBC 154 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
3682 tgl 4046 ECB : {
3682 tgl 4047 GIC 154 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4048 :
4049 154 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4050 : resultRelInfo,
4051 : fdw_private,
3682 tgl 4052 ECB : i,
4053 : eflags);
4054 : }
4055 :
4056 : /*
377 alvherre 4057 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4058 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
733 tgl 4059 : * tables, the FDW might have created additional junk attr(s), but
4060 : * those are no concern of ours.
4061 : */
377 alvherre 4062 CBC 66573 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
377 alvherre 4063 ECB : operation == CMD_MERGE)
733 tgl 4064 : {
4065 : char relkind;
4066 :
733 tgl 4067 GIC 15037 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
733 tgl 4068 CBC 15037 : if (relkind == RELKIND_RELATION ||
733 tgl 4069 GIC 284 : relkind == RELKIND_MATVIEW ||
4070 : relkind == RELKIND_PARTITIONED_TABLE)
4071 : {
4072 14771 : resultRelInfo->ri_RowIdAttNo =
4073 14771 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4074 14771 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
733 tgl 4075 UIC 0 : elog(ERROR, "could not find junk ctid column");
4076 : }
733 tgl 4077 GIC 266 : else if (relkind == RELKIND_FOREIGN_TABLE)
4078 : {
4079 : /*
4080 : * We don't support MERGE with foreign tables for now. (It's
377 alvherre 4081 ECB : * problematic because the implementation uses CTID.)
4082 : */
377 alvherre 4083 GIC 170 : Assert(operation != CMD_MERGE);
4084 :
4085 : /*
733 tgl 4086 ECB : * When there is a row-level trigger, there should be a
4087 : * wholerow attribute. We also require it to be present in
377 alvherre 4088 : * UPDATE and MERGE, so we can get the values of unchanged
4089 : * columns.
4090 : */
733 tgl 4091 CBC 170 : resultRelInfo->ri_RowIdAttNo =
4092 170 : ExecFindJunkAttributeInTlist(subplan->targetlist,
733 tgl 4093 ECB : "wholerow");
377 alvherre 4094 GBC 170 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
733 tgl 4095 GIC 95 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
733 tgl 4096 LBC 0 : elog(ERROR, "could not find junk wholerow column");
4097 : }
4098 : else
4099 : {
4100 : /* No support for MERGE */
377 alvherre 4101 GIC 96 : Assert(operation != CMD_MERGE);
733 tgl 4102 ECB : /* Other valid target relkinds must provide wholerow */
733 tgl 4103 GIC 96 : resultRelInfo->ri_RowIdAttNo =
4104 96 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4105 : "wholerow");
4106 96 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
733 tgl 4107 UIC 0 : elog(ERROR, "could not find junk wholerow column");
4108 : }
4109 : }
4929 tgl 4110 ECB : }
4426 4111 :
4112 : /*
377 alvherre 4113 : * If this is an inherited update/delete/merge, there will be a junk
377 alvherre 4114 EUB : * attribute named "tableoid" present in the subplan's targetlist. It
4115 : * will be used to identify the result relation for a given tuple to be
4116 : * updated/deleted/merged.
4117 : */
733 tgl 4118 GIC 65505 : mtstate->mt_resultOidAttno =
4119 65505 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4120 65505 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4121 65505 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4122 65505 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4123 :
4124 : /* Get the root target relation */
902 heikki.linnakangas 4125 CBC 65505 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
1906 rhaas 4126 ECB :
4127 : /*
733 tgl 4128 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
377 alvherre 4129 : * or MERGE might need this too, but only if it actually moves tuples
4130 : * between partitions; in that case setup is done by
4131 : * ExecCrossPartitionUpdate.
1906 rhaas 4132 : */
1906 rhaas 4133 GIC 65505 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4134 : operation == CMD_INSERT)
1872 4135 2706 : mtstate->mt_partition_tuple_routing =
733 tgl 4136 2706 : ExecSetupPartitionTupleRouting(estate, rel);
4137 :
4138 : /*
4139 : * Initialize any WITH CHECK OPTION constraints if needed.
3552 sfrost 4140 ECB : */
3552 sfrost 4141 GIC 65505 : resultRelInfo = mtstate->resultRelInfo;
3552 sfrost 4142 CBC 66124 : foreach(l, node->withCheckOptionLists)
3552 sfrost 4143 ECB : {
3552 sfrost 4144 GIC 619 : List *wcoList = (List *) lfirst(l);
4145 619 : List *wcoExprs = NIL;
4146 : ListCell *ll;
4147 :
3552 sfrost 4148 CBC 1642 : foreach(ll, wcoList)
3552 sfrost 4149 ECB : {
3552 sfrost 4150 GIC 1023 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
2217 andres 4151 CBC 1023 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
1545 andres 4152 ECB : &mtstate->ps);
4153 :
3552 sfrost 4154 GIC 1023 : wcoExprs = lappend(wcoExprs, wcoExpr);
3552 sfrost 4155 ECB : }
4156 :
3552 sfrost 4157 CBC 619 : resultRelInfo->ri_WithCheckOptions = wcoList;
4158 619 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
3552 sfrost 4159 GIC 619 : resultRelInfo++;
4160 : }
3552 sfrost 4161 ECB :
4162 : /*
4163 : * Initialize RETURNING projections if needed.
4929 tgl 4164 : */
4929 tgl 4165 CBC 65505 : if (node->returningLists)
4929 tgl 4166 ECB : {
4167 : TupleTableSlot *slot;
4168 : ExprContext *econtext;
4169 :
4170 : /*
4171 : * Initialize result tuple slot and assign its rowtype using the first
3260 bruce 4172 : * RETURNING list. We assume the rest will look the same.
4173 : */
1927 andres 4174 GIC 1990 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4175 :
4176 : /* Set up a slot for the output of the RETURNING projection(s) */
1606 4177 1990 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4929 tgl 4178 1990 : slot = mtstate->ps.ps_ResultTupleSlot;
4179 :
4180 : /* Need an econtext too */
2217 andres 4181 CBC 1990 : if (mtstate->ps.ps_ExprContext == NULL)
2217 andres 4182 GIC 1990 : ExecAssignExprContext(estate, &mtstate->ps);
4183 1990 : econtext = mtstate->ps.ps_ExprContext;
4929 tgl 4184 ECB :
4185 : /*
4186 : * Build a projection for each result rel.
4187 : */
4426 tgl 4188 CBC 1990 : resultRelInfo = mtstate->resultRelInfo;
4929 4189 4129 : foreach(l, node->returningLists)
4929 tgl 4190 ECB : {
4929 tgl 4191 GIC 2139 : List *rlist = (List *) lfirst(l);
4192 :
1829 rhaas 4193 2139 : resultRelInfo->ri_returningList = rlist;
4929 tgl 4194 2139 : resultRelInfo->ri_projectReturning =
2217 andres 4195 CBC 2139 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
2118 tgl 4196 2139 : resultRelInfo->ri_RelationDesc->rd_att);
4929 tgl 4197 GIC 2139 : resultRelInfo++;
4929 tgl 4198 ECB : }
4199 : }
4200 : else
4201 : {
4202 : /*
4790 bruce 4203 : * We still must construct a dummy result tuple type, because InitPlan
4204 : * expects one (maybe should change that?).
4205 : */
1927 andres 4206 GIC 63515 : mtstate->ps.plan->targetlist = NIL;
1612 4207 63515 : ExecInitResultTypeTL(&mtstate->ps);
4208 :
4929 tgl 4209 63515 : mtstate->ps.ps_ExprContext = NULL;
4210 : }
4211 :
4212 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
1840 alvherre 4213 CBC 65505 : resultRelInfo = mtstate->resultRelInfo;
4214 65505 : if (node->onConflictAction != ONCONFLICT_NONE)
4215 : {
733 tgl 4216 ECB : /* insert may only have one relation, inheritance is not expanded */
733 tgl 4217 GIC 594 : Assert(nrels == 1);
1840 alvherre 4218 594 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4219 : }
1840 alvherre 4220 ECB :
2893 andres 4221 : /*
4222 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4223 : * DO UPDATE.
4224 : */
2893 andres 4225 CBC 65505 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4226 : {
699 tgl 4227 GIC 450 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4228 : ExprContext *econtext;
4229 : TupleDesc relationDesc;
4230 :
4231 : /* already exists if created by RETURNING processing above */
2893 andres 4232 CBC 450 : if (mtstate->ps.ps_ExprContext == NULL)
2893 andres 4233 GIC 316 : ExecAssignExprContext(estate, &mtstate->ps);
2893 andres 4234 ECB :
2893 andres 4235 GIC 450 : econtext = mtstate->ps.ps_ExprContext;
1878 4236 450 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4237 :
4238 : /* create state for DO UPDATE SET operation */
699 tgl 4239 CBC 450 : resultRelInfo->ri_onConflict = onconfl;
1840 alvherre 4240 ECB :
4241 : /* initialize slot for the existing tuple */
699 tgl 4242 CBC 450 : onconfl->oc_Existing =
1490 andres 4243 450 : table_slot_create(resultRelInfo->ri_RelationDesc,
1490 andres 4244 GIC 450 : &mtstate->ps.state->es_tupleTable);
4245 :
1355 andres 4246 ECB : /*
4247 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4248 : * of the table's type here, because the slot will be used to insert
4249 : * into the table, and for RETURNING processing - which may access
4250 : * system attributes.
4251 : */
699 tgl 4252 GIC 450 : onconfl->oc_ProjSlot =
4253 450 : table_slot_create(resultRelInfo->ri_RelationDesc,
4254 450 : &mtstate->ps.state->es_tupleTable);
4255 :
4256 : /* build UPDATE SET projection state */
4257 450 : onconfl->oc_ProjInfo =
4258 450 : ExecBuildUpdateProjection(node->onConflictSet,
699 tgl 4259 ECB : true,
4260 : node->onConflictCols,
4261 : relationDesc,
4262 : econtext,
4263 : onconfl->oc_ProjSlot,
4264 : &mtstate->ps);
2893 andres 4265 :
4266 : /* initialize state to evaluate the WHERE clause, if any */
2893 andres 4267 GIC 450 : if (node->onConflictWhere)
4268 : {
4269 : ExprState *qualexpr;
4270 :
2217 4271 88 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4272 : &mtstate->ps);
699 tgl 4273 88 : onconfl->oc_WhereClause = qualexpr;
2893 andres 4274 ECB : }
4275 : }
4276 :
4277 : /*
4790 bruce 4278 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4279 : * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
3260 4280 : * EvalPlanQual mechanism needs to be told about them. Locate the
4281 : * relevant ExecRowMarks.
4282 : */
739 tgl 4283 GIC 65505 : arowmarks = NIL;
4913 4284 66239 : foreach(l, node->rowMarks)
4285 : {
2190 4286 734 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4287 : ExecRowMark *erm;
4288 : ExecAuxRowMark *aerm;
4289 :
4913 tgl 4290 ECB : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4913 tgl 4291 CBC 734 : if (rc->isParent)
4913 tgl 4292 GIC 47 : continue;
4913 tgl 4293 ECB :
4294 : /* Find ExecRowMark and build ExecAuxRowMark */
2889 tgl 4295 GIC 687 : erm = ExecFindRowMark(estate, rc->rti, false);
739 4296 687 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4297 687 : arowmarks = lappend(arowmarks, aerm);
4913 tgl 4298 ECB : }
4299 :
4300 : /* For a MERGE command, initialize its state */
377 alvherre 4301 GIC 65505 : if (mtstate->operation == CMD_MERGE)
377 alvherre 4302 CBC 431 : ExecInitMerge(mtstate, estate);
377 alvherre 4303 ECB :
739 tgl 4304 CBC 65505 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4305 :
4306 : /*
4307 : * If there are a lot of result relations, use a hash table to speed the
739 tgl 4308 ECB : * lookups. If there are not a lot, a simple linear search is faster.
4309 : *
4310 : * It's not clear where the threshold is, but try 64 for starters. In a
4311 : * debugging build, use a small threshold so that we get some test
4312 : * coverage of both code paths.
4313 : */
4314 : #ifdef USE_ASSERT_CHECKING
4315 : #define MT_NRELS_HASH 4
4316 : #else
4317 : #define MT_NRELS_HASH 64
4318 : #endif
739 tgl 4319 GIC 65505 : if (nrels >= MT_NRELS_HASH)
4320 : {
4321 : HASHCTL hash_ctl;
4322 :
4323 142 : hash_ctl.keysize = sizeof(Oid);
4324 142 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4325 142 : hash_ctl.hcxt = CurrentMemoryContext;
739 tgl 4326 CBC 142 : mtstate->mt_resultOidHash =
739 tgl 4327 GIC 142 : hash_create("ModifyTable target hash",
4328 : nrels, &hash_ctl,
4329 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
739 tgl 4330 CBC 809 : for (i = 0; i < nrels; i++)
4929 tgl 4331 ECB : {
739 4332 : Oid hashkey;
4333 : MTTargetRelLookup *mtlookup;
4334 : bool found;
4335 :
739 tgl 4336 GIC 667 : resultRelInfo = &mtstate->resultRelInfo[i];
739 tgl 4337 CBC 667 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4338 : mtlookup = (MTTargetRelLookup *)
739 tgl 4339 GIC 667 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4340 : HASH_ENTER, &found);
4341 667 : Assert(!found);
4342 667 : mtlookup->relationIndex = i;
4929 tgl 4343 ECB : }
4344 : }
4345 : else
739 tgl 4346 CBC 65363 : mtstate->mt_resultOidHash = NULL;
4347 :
809 tomas.vondra 4348 ECB : /*
697 tgl 4349 : * Determine if the FDW supports batch insert and determine the batch size
4350 : * (a FDW may support batching, but it may be disabled for the
4351 : * server/table).
4352 : *
4353 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4354 : * remains set to 0.
4355 : */
808 tomas.vondra 4356 GIC 65505 : if (operation == CMD_INSERT)
4357 : {
4358 : /* insert may only have one relation, inheritance is not expanded */
733 tgl 4359 51536 : Assert(nrels == 1);
808 tomas.vondra 4360 51536 : resultRelInfo = mtstate->resultRelInfo;
733 tgl 4361 51536 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4362 51536 : resultRelInfo->ri_FdwRoutine != NULL &&
733 tgl 4363 CBC 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
733 tgl 4364 GIC 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4365 : {
733 tgl 4366 CBC 88 : resultRelInfo->ri_BatchSize =
4367 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
808 tomas.vondra 4368 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
808 tomas.vondra 4369 ECB : }
733 tgl 4370 : else
733 tgl 4371 CBC 51448 : resultRelInfo->ri_BatchSize = 1;
4372 : }
809 tomas.vondra 4373 ECB :
4426 tgl 4374 : /*
4375 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4376 : * to estate->es_auxmodifytables so that it will be run to completion by
4377 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4382 bruce 4378 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4379 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4380 : * before earlier ones. This ensures that we don't throw away RETURNING
4381 : * rows that need to be seen by a later CTE subplan.
4382 : */
4426 tgl 4383 GIC 65505 : if (!mtstate->canSetTag)
4384 447 : estate->es_auxmodifytables = lcons(mtstate,
4385 : estate->es_auxmodifytables);
4386 :
4929 4387 65505 : return mtstate;
4388 : }
4389 :
4929 tgl 4390 ECB : /* ----------------------------------------------------------------
4391 : * ExecEndModifyTable
4392 : *
4393 : * Shuts down the plan.
4394 : *
4395 : * Returns nothing of interest.
4396 : * ----------------------------------------------------------------
4397 : */
4398 : void
4929 tgl 4399 GIC 63676 : ExecEndModifyTable(ModifyTableState *node)
4400 : {
4401 : int i;
4402 :
4403 : /*
4404 : * Allow any FDWs to shut down
4405 : */
739 tgl 4406 CBC 128293 : for (i = 0; i < node->mt_nrels; i++)
4407 : {
4408 : int j;
3682 tgl 4409 GIC 64617 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4410 :
2578 rhaas 4411 64617 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4412 64521 : resultRelInfo->ri_FdwRoutine != NULL &&
3682 tgl 4413 CBC 144 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
3682 tgl 4414 GIC 144 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4415 : resultRelInfo);
667 tomas.vondra 4416 ECB :
4417 : /*
650 andrew 4418 : * Cleanup the initialized batch slots. This only matters for FDWs
4419 : * with batching, but the other cases will have ri_NumSlotsInitialized
4420 : * == 0.
667 tomas.vondra 4421 : */
667 tomas.vondra 4422 GIC 130180 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4423 : {
4424 65563 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4425 65563 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4426 : }
4427 : }
4428 :
1605 alvherre 4429 ECB : /*
4430 : * Close all the partitioned tables, leaf partitions, and their indices
4431 : * and release the slot used for tuple routing, if set.
4432 : */
1921 rhaas 4433 GIC 63676 : if (node->mt_partition_tuple_routing)
4434 : {
1829 4435 2699 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4436 :
1605 alvherre 4437 2699 : if (node->mt_root_tuple_slot)
4438 251 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4439 : }
1605 alvherre 4440 ECB :
4441 : /*
4929 tgl 4442 : * Free the exprcontext
4443 : */
4929 tgl 4444 CBC 63676 : ExecFreeExprContext(&node->ps);
4929 tgl 4445 ECB :
4446 : /*
4447 : * clean out the tuple table
4448 : */
1612 andres 4449 GIC 63676 : if (node->ps.ps_ResultTupleSlot)
4450 1924 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
4929 tgl 4451 ECB :
4452 : /*
4453 : * Terminate EPQ execution if active
4454 : */
4913 tgl 4455 GIC 63676 : EvalPlanQualEnd(&node->mt_epqstate);
4913 tgl 4456 ECB :
4929 4457 : /*
4458 : * shut down subplan
4459 : */
739 tgl 4460 GIC 63676 : ExecEndNode(outerPlanState(node));
4929 4461 63676 : }
4929 tgl 4462 ECB :
4463 : void
4654 tgl 4464 UIC 0 : ExecReScanModifyTable(ModifyTableState *node)
4465 : {
4466 : /*
4790 bruce 4467 ECB : * Currently, we don't need to support rescan on ModifyTable nodes. The
4468 : * semantics of that would be a bit debatable anyway.
4469 : */
4929 tgl 4470 UIC 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4929 tgl 4471 EUB : }
|