Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeModifyTable.c
4 : : * routines to handle ModifyTable nodes.
5 : : *
6 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeModifyTable.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /* INTERFACE ROUTINES
16 : : * ExecInitModifyTable - initialize the ModifyTable node
17 : : * ExecModifyTable - retrieve the next tuple from the node
18 : : * ExecEndModifyTable - shut down the ModifyTable node
19 : : * ExecReScanModifyTable - rescan the ModifyTable node
20 : : *
21 : : * NOTES
22 : : * The ModifyTable node receives input from its outerPlan, which is
23 : : * the data to insert for INSERT cases, the changed columns' new
24 : : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : : * row-locating info for DELETE cases.
26 : : *
27 : : * MERGE runs a join between the source relation and the target table.
28 : : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
29 : : * is an outer join that might output tuples without a matching target
30 : : * tuple. In this case, any unmatched target tuples will have NULL
31 : : * row-locating info, and only INSERT can be run. But for matched target
32 : : * tuples, the row-locating info is used to determine the tuple to UPDATE
33 : : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
34 : : * SOURCE, all tuples produced by the join will include a matching target
35 : : * tuple, so all tuples contain row-locating info.
36 : : *
37 : : * If the query specifies RETURNING, then the ModifyTable returns a
38 : : * RETURNING tuple after completing each row insert, update, or delete.
39 : : * It must be called again to continue the operation. Without RETURNING,
40 : : * we just loop within the node until all the work is done, then
41 : : * return NULL. This avoids useless call/return overhead.
42 : : */
43 : :
44 : : #include "postgres.h"
45 : :
46 : : #include "access/htup_details.h"
47 : : #include "access/tableam.h"
48 : : #include "access/xact.h"
49 : : #include "commands/trigger.h"
50 : : #include "executor/execPartition.h"
51 : : #include "executor/executor.h"
52 : : #include "executor/nodeModifyTable.h"
53 : : #include "foreign/fdwapi.h"
54 : : #include "miscadmin.h"
55 : : #include "nodes/nodeFuncs.h"
56 : : #include "optimizer/optimizer.h"
57 : : #include "rewrite/rewriteHandler.h"
58 : : #include "storage/lmgr.h"
59 : : #include "utils/builtins.h"
60 : : #include "utils/datum.h"
61 : : #include "utils/rel.h"
62 : : #include "utils/snapmgr.h"
63 : :
64 : :
65 : : typedef struct MTTargetRelLookup
66 : : {
67 : : Oid relationOid; /* hash key, must be first */
68 : : int relationIndex; /* rel's index in resultRelInfo[] array */
69 : : } MTTargetRelLookup;
70 : :
71 : : /*
72 : : * Context struct for a ModifyTable operation, containing basic execution
73 : : * state and some output variables populated by ExecUpdateAct() and
74 : : * ExecDeleteAct() to report the result of their actions to callers.
75 : : */
76 : : typedef struct ModifyTableContext
77 : : {
78 : : /* Operation state */
79 : : ModifyTableState *mtstate;
80 : : EPQState *epqstate;
81 : : EState *estate;
82 : :
83 : : /*
84 : : * Slot containing tuple obtained from ModifyTable's subplan. Used to
85 : : * access "junk" columns that are not going to be stored.
86 : : */
87 : : TupleTableSlot *planSlot;
88 : :
89 : : /*
90 : : * Information about the changes that were made concurrently to a tuple
91 : : * being updated or deleted
92 : : */
93 : : TM_FailureData tmfd;
94 : :
95 : : /*
96 : : * The tuple projected by the INSERT's RETURNING clause, when doing a
97 : : * cross-partition UPDATE
98 : : */
99 : : TupleTableSlot *cpUpdateReturningSlot;
100 : : } ModifyTableContext;
101 : :
102 : : /*
103 : : * Context struct containing output data specific to UPDATE operations.
104 : : */
105 : : typedef struct UpdateContext
106 : : {
107 : : bool crossPartUpdate; /* was it a cross-partition update? */
108 : : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
109 : :
110 : : /*
111 : : * Lock mode to acquire on the latest tuple version before performing
112 : : * EvalPlanQual on it
113 : : */
114 : : LockTupleMode lockmode;
115 : : } UpdateContext;
116 : :
117 : :
118 : : static void ExecBatchInsert(ModifyTableState *mtstate,
119 : : ResultRelInfo *resultRelInfo,
120 : : TupleTableSlot **slots,
121 : : TupleTableSlot **planSlots,
122 : : int numSlots,
123 : : EState *estate,
124 : : bool canSetTag);
125 : : static void ExecPendingInserts(EState *estate);
126 : : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
127 : : ResultRelInfo *sourcePartInfo,
128 : : ResultRelInfo *destPartInfo,
129 : : ItemPointer tupleid,
130 : : TupleTableSlot *oldslot,
131 : : TupleTableSlot *newslot);
132 : : static bool ExecOnConflictUpdate(ModifyTableContext *context,
133 : : ResultRelInfo *resultRelInfo,
134 : : ItemPointer conflictTid,
135 : : TupleTableSlot *excludedSlot,
136 : : bool canSetTag,
137 : : TupleTableSlot **returning);
138 : : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
139 : : EState *estate,
140 : : PartitionTupleRouting *proute,
141 : : ResultRelInfo *targetRelInfo,
142 : : TupleTableSlot *slot,
143 : : ResultRelInfo **partRelInfo);
144 : :
145 : : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
146 : : ResultRelInfo *resultRelInfo,
147 : : ItemPointer tupleid,
148 : : HeapTuple oldtuple,
149 : : bool canSetTag);
150 : : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
151 : : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
152 : : ResultRelInfo *resultRelInfo,
153 : : ItemPointer tupleid,
154 : : HeapTuple oldtuple,
155 : : bool canSetTag,
156 : : bool *matched);
157 : : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
158 : : ResultRelInfo *resultRelInfo,
159 : : bool canSetTag);
160 : :
161 : :
162 : : /*
163 : : * Verify that the tuples to be produced by INSERT match the
164 : : * target relation's rowtype
165 : : *
166 : : * We do this to guard against stale plans. If plan invalidation is
167 : : * functioning properly then we should never get a failure here, but better
168 : : * safe than sorry. Note that this is called after we have obtained lock
169 : : * on the target rel, so the rowtype can't change underneath us.
170 : : *
171 : : * The plan output is represented by its targetlist, because that makes
172 : : * handling the dropped-column case easier.
173 : : *
174 : : * We used to use this for UPDATE as well, but now the equivalent checks
175 : : * are done in ExecBuildUpdateProjection.
176 : : */
177 : : static void
5300 tgl@sss.pgh.pa.us 178 :CBC 43918 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
179 : : {
180 : 43918 : TupleDesc resultDesc = RelationGetDescr(resultRel);
181 : 43918 : int attno = 0;
182 : : ListCell *lc;
183 : :
184 [ + + + + : 134875 : foreach(lc, targetList)
+ + ]
185 : : {
186 : 90957 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
187 : : Form_pg_attribute attr;
188 : :
1110 189 [ - + ]: 90957 : Assert(!tle->resjunk); /* caller removed junk items already */
190 : :
5300 191 [ - + ]: 90957 : if (attno >= resultDesc->natts)
5300 tgl@sss.pgh.pa.us 192 [ # # ]:UBC 0 : ereport(ERROR,
193 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
194 : : errmsg("table row type and query-specified row type do not match"),
195 : : errdetail("Query has too many columns.")));
2429 andres@anarazel.de 196 :CBC 90957 : attr = TupleDescAttr(resultDesc, attno);
197 : 90957 : attno++;
198 : :
5300 tgl@sss.pgh.pa.us 199 [ + + ]: 90957 : if (!attr->attisdropped)
200 : : {
201 : : /* Normal case: demand type match */
202 [ - + ]: 90652 : if (exprType((Node *) tle->expr) != attr->atttypid)
5300 tgl@sss.pgh.pa.us 203 [ # # ]:UBC 0 : ereport(ERROR,
204 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
205 : : errmsg("table row type and query-specified row type do not match"),
206 : : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
207 : : format_type_be(attr->atttypid),
208 : : attno,
209 : : format_type_be(exprType((Node *) tle->expr)))));
210 : : }
211 : : else
212 : : {
213 : : /*
214 : : * For a dropped column, we can't check atttypid (it's likely 0).
215 : : * In any case the planner has most likely inserted an INT4 null.
216 : : * What we insist on is just *some* NULL constant.
217 : : */
5300 tgl@sss.pgh.pa.us 218 [ + - ]:CBC 305 : if (!IsA(tle->expr, Const) ||
219 [ - + ]: 305 : !((Const *) tle->expr)->constisnull)
5300 tgl@sss.pgh.pa.us 220 [ # # ]:UBC 0 : ereport(ERROR,
221 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
222 : : errmsg("table row type and query-specified row type do not match"),
223 : : errdetail("Query provides a value for a dropped column at ordinal position %d.",
224 : : attno)));
225 : : }
226 : : }
5300 tgl@sss.pgh.pa.us 227 [ - + ]:CBC 43918 : if (attno != resultDesc->natts)
5300 tgl@sss.pgh.pa.us 228 [ # # ]:UBC 0 : ereport(ERROR,
229 : : (errcode(ERRCODE_DATATYPE_MISMATCH),
230 : : errmsg("table row type and query-specified row type do not match"),
231 : : errdetail("Query has too few columns.")));
5300 tgl@sss.pgh.pa.us 232 :CBC 43918 : }
233 : :
234 : : /*
235 : : * ExecProcessReturning --- evaluate a RETURNING list
236 : : *
237 : : * resultRelInfo: current result rel
238 : : * tupleSlot: slot holding tuple actually inserted/updated/deleted
239 : : * planSlot: slot holding tuple returned by top subplan node
240 : : *
241 : : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
242 : : * scan tuple.
243 : : *
244 : : * Returns a slot holding the result tuple
245 : : */
246 : : static TupleTableSlot *
2949 rhaas@postgresql.org 247 : 3716 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
248 : : TupleTableSlot *tupleSlot,
249 : : TupleTableSlot *planSlot)
250 : : {
251 : 3716 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
5300 tgl@sss.pgh.pa.us 252 : 3716 : ExprContext *econtext = projectReturning->pi_exprContext;
253 : :
254 : : /* Make tuple and any needed join variables available to ExecProject */
2949 rhaas@postgresql.org 255 [ + + ]: 3716 : if (tupleSlot)
256 : 3369 : econtext->ecxt_scantuple = tupleSlot;
5300 tgl@sss.pgh.pa.us 257 : 3716 : econtext->ecxt_outertuple = planSlot;
258 : :
259 : : /*
260 : : * RETURNING expressions might reference the tableoid column, so
261 : : * reinitialize tts_tableOid before evaluating them.
262 : : */
1874 andres@anarazel.de 263 : 3716 : econtext->ecxt_scantuple->tts_tableOid =
264 : 3716 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
265 : :
266 : : /* Compute the RETURNING expressions */
2642 267 : 3716 : return ExecProject(projectReturning);
268 : : }
269 : :
270 : : /*
271 : : * ExecCheckTupleVisible -- verify tuple is visible
272 : : *
273 : : * It would not be consistent with guarantees of the higher isolation levels to
274 : : * proceed with avoiding insertion (taking speculative insertion's alternative
275 : : * path) on the basis of another tuple that is not visible to MVCC snapshot.
276 : : * Check for the need to raise a serialization failure, and do so as necessary.
277 : : */
278 : : static void
1849 279 : 2620 : ExecCheckTupleVisible(EState *estate,
280 : : Relation rel,
281 : : TupleTableSlot *slot)
282 : : {
3264 283 [ + + ]: 2620 : if (!IsolationUsesXactSnapshot())
284 : 2588 : return;
285 : :
1849 286 [ + + ]: 32 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
287 : : {
288 : : Datum xminDatum;
289 : : TransactionId xmin;
290 : : bool isnull;
291 : :
292 : 20 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
293 [ - + ]: 20 : Assert(!isnull);
294 : 20 : xmin = DatumGetTransactionId(xminDatum);
295 : :
296 : : /*
297 : : * We should not raise a serialization failure if the conflict is
298 : : * against a tuple inserted by our own transaction, even if it's not
299 : : * visible to our snapshot. (This would happen, for example, if
300 : : * conflicting keys are proposed for insertion in a single command.)
301 : : */
302 [ + + ]: 20 : if (!TransactionIdIsCurrentTransactionId(xmin))
2730 tgl@sss.pgh.pa.us 303 [ + - ]: 10 : ereport(ERROR,
304 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
305 : : errmsg("could not serialize access due to concurrent update")));
306 : : }
307 : : }
308 : :
309 : : /*
310 : : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
311 : : */
312 : : static void
3264 andres@anarazel.de 313 : 79 : ExecCheckTIDVisible(EState *estate,
314 : : ResultRelInfo *relinfo,
315 : : ItemPointer tid,
316 : : TupleTableSlot *tempSlot)
317 : : {
318 : 79 : Relation rel = relinfo->ri_RelationDesc;
319 : :
320 : : /* Redundantly check isolation level */
321 [ + + ]: 79 : if (!IsolationUsesXactSnapshot())
322 : 47 : return;
323 : :
1788 324 [ - + ]: 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
3264 andres@anarazel.de 325 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
1849 andres@anarazel.de 326 :CBC 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
327 : 22 : ExecClearTuple(tempSlot);
328 : : }
329 : :
330 : : /*
331 : : * Initialize to compute stored generated columns for a tuple
332 : : *
333 : : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
334 : : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
335 : : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
336 : : *
337 : : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
338 : : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
339 : : * cross-partition UPDATEs, since a partition might be the target of both
340 : : * UPDATE and INSERT actions.
341 : : */
342 : : void
465 tgl@sss.pgh.pa.us 343 : 29600 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
344 : : EState *estate,
345 : : CmdType cmdtype)
346 : : {
1842 peter@eisentraut.org 347 : 29600 : Relation rel = resultRelInfo->ri_RelationDesc;
348 : 29600 : TupleDesc tupdesc = RelationGetDescr(rel);
349 : 29600 : int natts = tupdesc->natts;
350 : : ExprState **ri_GeneratedExprs;
351 : : int ri_NumGeneratedNeeded;
352 : : Bitmapset *updatedCols;
353 : : MemoryContext oldContext;
354 : :
355 : : /* Nothing to do if no generated columns */
465 tgl@sss.pgh.pa.us 356 [ + + + + ]: 29600 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
357 : 29069 : return;
358 : :
359 : : /*
360 : : * In an UPDATE, we can skip computing any generated columns that do not
361 : : * depend on any UPDATE target column. But if there is a BEFORE ROW
362 : : * UPDATE trigger, we cannot skip because the trigger might change more
363 : : * columns.
364 : : */
365 [ + + ]: 531 : if (cmdtype == CMD_UPDATE &&
366 [ + + - + ]: 115 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
367 : 102 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
368 : : else
369 : 429 : updatedCols = NULL;
370 : :
371 : : /*
372 : : * Make sure these data structures are built in the per-query memory
373 : : * context so they'll survive throughout the query.
374 : : */
375 : 531 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
376 : :
405 377 : 531 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
378 : 531 : ri_NumGeneratedNeeded = 0;
379 : :
465 380 [ + + ]: 2255 : for (int i = 0; i < natts; i++)
381 : : {
382 [ + + ]: 1724 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
383 : : {
384 : : Expr *expr;
385 : :
386 : : /* Fetch the GENERATED AS expression tree */
387 : 541 : expr = (Expr *) build_column_default(rel, i + 1);
388 [ - + ]: 541 : if (expr == NULL)
465 tgl@sss.pgh.pa.us 389 [ # # ]:UBC 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
390 : : i + 1, RelationGetRelationName(rel));
391 : :
392 : : /*
393 : : * If it's an update with a known set of update target columns,
394 : : * see if we can skip the computation.
395 : : */
465 tgl@sss.pgh.pa.us 396 [ + + ]:CBC 541 : if (updatedCols)
397 : : {
398 : 105 : Bitmapset *attrs_used = NULL;
399 : :
400 : 105 : pull_varattnos((Node *) expr, 1, &attrs_used);
401 : :
402 [ + + ]: 105 : if (!bms_overlap(updatedCols, attrs_used))
403 : 12 : continue; /* need not update this column */
404 : : }
405 : :
406 : : /* No luck, so prepare the expression for execution */
405 407 : 529 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
408 : 529 : ri_NumGeneratedNeeded++;
409 : :
410 : : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
411 [ + + ]: 529 : if (cmdtype == CMD_UPDATE)
412 : 106 : resultRelInfo->ri_extraUpdatedCols =
413 : 106 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
414 : : i + 1 - FirstLowInvalidHeapAttributeNumber);
415 : : }
416 : : }
417 : :
418 : : /* Save in appropriate set of fields */
419 [ + + ]: 531 : if (cmdtype == CMD_UPDATE)
420 : : {
421 : : /* Don't call twice */
422 [ - + ]: 115 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
423 : :
424 : 115 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
425 : 115 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
426 : : }
427 : : else
428 : : {
429 : : /* Don't call twice */
430 [ - + ]: 416 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
431 : :
432 : 416 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
433 : 416 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
434 : : }
435 : :
465 436 : 531 : MemoryContextSwitchTo(oldContext);
437 : : }
438 : :
439 : : /*
440 : : * Compute stored generated columns for a tuple
441 : : */
442 : : void
443 : 678 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
444 : : EState *estate, TupleTableSlot *slot,
445 : : CmdType cmdtype)
446 : : {
447 : 678 : Relation rel = resultRelInfo->ri_RelationDesc;
448 : 678 : TupleDesc tupdesc = RelationGetDescr(rel);
449 : 678 : int natts = tupdesc->natts;
450 [ + + ]: 678 : ExprContext *econtext = GetPerTupleExprContext(estate);
451 : : ExprState **ri_GeneratedExprs;
452 : : MemoryContext oldContext;
453 : : Datum *values;
454 : : bool *nulls;
455 : :
456 : : /* We should not be called unless this is true */
457 [ + - - + ]: 678 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
458 : :
459 : : /*
460 : : * Initialize the expressions if we didn't already, and check whether we
461 : : * can exit early because nothing needs to be computed.
462 : : */
405 463 [ + + ]: 678 : if (cmdtype == CMD_UPDATE)
464 : : {
465 [ + + ]: 133 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
466 : 102 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
467 [ + + ]: 133 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
468 : 9 : return;
469 : 124 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
470 : : }
471 : : else
472 : : {
473 [ + + ]: 545 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
474 : 416 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
475 : : /* Early exit is impossible given the prior Assert */
476 [ - + ]: 545 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
477 : 545 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
478 : : }
479 : :
1842 peter@eisentraut.org 480 [ + - ]: 669 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
481 : :
482 : 669 : values = palloc(sizeof(*values) * natts);
483 : 669 : nulls = palloc(sizeof(*nulls) * natts);
484 : :
1796 485 : 669 : slot_getallattrs(slot);
486 : 669 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
487 : :
1842 488 [ + + ]: 2729 : for (int i = 0; i < natts; i++)
489 : : {
1796 490 : 2066 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
491 : :
405 tgl@sss.pgh.pa.us 492 [ + + ]: 2066 : if (ri_GeneratedExprs[i])
493 : : {
494 : : Datum val;
495 : : bool isnull;
496 : :
465 497 [ - + ]: 676 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
498 : :
1842 peter@eisentraut.org 499 : 676 : econtext->ecxt_scantuple = slot;
500 : :
405 tgl@sss.pgh.pa.us 501 : 676 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
502 : :
503 : : /*
504 : : * We must make a copy of val as we have no guarantees about where
505 : : * memory for a pass-by-reference Datum is located.
506 : : */
1457 drowley@postgresql.o 507 [ + + ]: 670 : if (!isnull)
508 : 649 : val = datumCopy(val, attr->attbyval, attr->attlen);
509 : :
1842 peter@eisentraut.org 510 : 670 : values[i] = val;
511 : 670 : nulls[i] = isnull;
512 : : }
513 : : else
514 : : {
1796 515 [ + + ]: 1390 : if (!nulls[i])
516 : 1369 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
517 : : }
518 : : }
519 : :
520 : 663 : ExecClearTuple(slot);
521 : 663 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
522 : 663 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
523 : 663 : ExecStoreVirtualTuple(slot);
524 : 663 : ExecMaterializeSlot(slot);
525 : :
1842 526 : 663 : MemoryContextSwitchTo(oldContext);
527 : : }
528 : :
529 : : /*
530 : : * ExecInitInsertProjection
531 : : * Do one-time initialization of projection data for INSERT tuples.
532 : : *
533 : : * INSERT queries may need a projection to filter out junk attrs in the tlist.
534 : : *
535 : : * This is also a convenient place to verify that the
536 : : * output of an INSERT matches the target table.
537 : : */
538 : : static void
1104 tgl@sss.pgh.pa.us 539 : 43436 : ExecInitInsertProjection(ModifyTableState *mtstate,
540 : : ResultRelInfo *resultRelInfo)
541 : : {
542 : 43436 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
543 : 43436 : Plan *subplan = outerPlan(node);
544 : 43436 : EState *estate = mtstate->ps.state;
545 : 43436 : List *insertTargetList = NIL;
546 : 43436 : bool need_projection = false;
547 : : ListCell *l;
548 : :
549 : : /* Extract non-junk columns of the subplan's result tlist. */
550 [ + + + + : 133170 : foreach(l, subplan->targetlist)
+ + ]
551 : : {
552 : 89734 : TargetEntry *tle = (TargetEntry *) lfirst(l);
553 : :
554 [ + - ]: 89734 : if (!tle->resjunk)
555 : 89734 : insertTargetList = lappend(insertTargetList, tle);
556 : : else
1104 tgl@sss.pgh.pa.us 557 :UBC 0 : need_projection = true;
558 : : }
559 : :
560 : : /*
561 : : * The junk-free list must produce a tuple suitable for the result
562 : : * relation.
563 : : */
1104 tgl@sss.pgh.pa.us 564 :CBC 43436 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
565 : :
566 : : /* We'll need a slot matching the table's format. */
567 : 43436 : resultRelInfo->ri_newTupleSlot =
568 : 43436 : table_slot_create(resultRelInfo->ri_RelationDesc,
569 : : &estate->es_tupleTable);
570 : :
571 : : /* Build ProjectionInfo if needed (it probably isn't). */
572 [ - + ]: 43436 : if (need_projection)
573 : : {
1104 tgl@sss.pgh.pa.us 574 :UBC 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
575 : :
576 : : /* need an expression context to do the projection */
577 [ # # ]: 0 : if (mtstate->ps.ps_ExprContext == NULL)
578 : 0 : ExecAssignExprContext(estate, &mtstate->ps);
579 : :
580 : 0 : resultRelInfo->ri_projectNew =
581 : 0 : ExecBuildProjectionInfo(insertTargetList,
582 : : mtstate->ps.ps_ExprContext,
583 : : resultRelInfo->ri_newTupleSlot,
584 : : &mtstate->ps,
585 : : relDesc);
586 : : }
587 : :
1104 tgl@sss.pgh.pa.us 588 :CBC 43436 : resultRelInfo->ri_projectNewInfoValid = true;
589 : 43436 : }
590 : :
591 : : /*
592 : : * ExecInitUpdateProjection
593 : : * Do one-time initialization of projection data for UPDATE tuples.
594 : : *
595 : : * UPDATE always needs a projection, because (1) there's always some junk
596 : : * attrs, and (2) we may need to merge values of not-updated columns from
597 : : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
598 : : * the subplan contains only new values for the changed columns, plus row
599 : : * identity info in the junk attrs.
600 : : *
601 : : * This is "one-time" for any given result rel, but we might touch more than
602 : : * one result rel in the course of an inherited UPDATE, and each one needs
603 : : * its own projection due to possible column order variation.
604 : : *
605 : : * This is also a convenient place to verify that the output of an UPDATE
606 : : * matches the target table (ExecBuildUpdateProjection does that).
607 : : */
608 : : static void
609 : 6368 : ExecInitUpdateProjection(ModifyTableState *mtstate,
610 : : ResultRelInfo *resultRelInfo)
611 : : {
612 : 6368 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
613 : 6368 : Plan *subplan = outerPlan(node);
614 : 6368 : EState *estate = mtstate->ps.state;
615 : 6368 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
616 : : int whichrel;
617 : : List *updateColnos;
618 : :
619 : : /*
620 : : * Usually, mt_lastResultIndex matches the target rel. If it happens not
621 : : * to, we can get the index the hard way with an integer division.
622 : : */
623 : 6368 : whichrel = mtstate->mt_lastResultIndex;
624 [ - + ]: 6368 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
625 : : {
1104 tgl@sss.pgh.pa.us 626 :UBC 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
627 [ # # # # ]: 0 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
628 : : }
629 : :
1104 tgl@sss.pgh.pa.us 630 :CBC 6368 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
631 : :
632 : : /*
633 : : * For UPDATE, we use the old tuple to fill up missing values in the tuple
634 : : * produced by the subplan to get the new tuple. We need two slots, both
635 : : * matching the table's desired format.
636 : : */
637 : 6368 : resultRelInfo->ri_oldTupleSlot =
638 : 6368 : table_slot_create(resultRelInfo->ri_RelationDesc,
639 : : &estate->es_tupleTable);
640 : 6368 : resultRelInfo->ri_newTupleSlot =
641 : 6368 : table_slot_create(resultRelInfo->ri_RelationDesc,
642 : : &estate->es_tupleTable);
643 : :
644 : : /* need an expression context to do the projection */
645 [ + + ]: 6368 : if (mtstate->ps.ps_ExprContext == NULL)
646 : 5768 : ExecAssignExprContext(estate, &mtstate->ps);
647 : :
648 : 6368 : resultRelInfo->ri_projectNew =
649 : 6368 : ExecBuildUpdateProjection(subplan->targetlist,
650 : : false, /* subplan did the evaluation */
651 : : updateColnos,
652 : : relDesc,
653 : : mtstate->ps.ps_ExprContext,
654 : : resultRelInfo->ri_newTupleSlot,
655 : : &mtstate->ps);
656 : :
657 : 6368 : resultRelInfo->ri_projectNewInfoValid = true;
658 : 6368 : }
659 : :
660 : : /*
661 : : * ExecGetInsertNewTuple
662 : : * This prepares a "new" tuple ready to be inserted into given result
663 : : * relation, by removing any junk columns of the plan's output tuple
664 : : * and (if necessary) coercing the tuple to the right tuple format.
665 : : */
666 : : static TupleTableSlot *
1110 667 : 5688666 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
668 : : TupleTableSlot *planSlot)
669 : : {
670 : 5688666 : ProjectionInfo *newProj = relinfo->ri_projectNew;
671 : : ExprContext *econtext;
672 : :
673 : : /*
674 : : * If there's no projection to be done, just make sure the slot is of the
675 : : * right type for the target rel. If the planSlot is the right type we
676 : : * can use it as-is, else copy the data into ri_newTupleSlot.
677 : : */
678 [ + - ]: 5688666 : if (newProj == NULL)
679 : : {
680 [ + + ]: 5688666 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
681 : : {
682 : 5302896 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
683 : 5302896 : return relinfo->ri_newTupleSlot;
684 : : }
685 : : else
686 : 385770 : return planSlot;
687 : : }
688 : :
689 : : /*
690 : : * Else project; since the projection output slot is ri_newTupleSlot, this
691 : : * will also fix any slot-type problem.
692 : : *
693 : : * Note: currently, this is dead code, because INSERT cases don't receive
694 : : * any junk columns so there's never a projection to be done.
695 : : */
1110 tgl@sss.pgh.pa.us 696 :UBC 0 : econtext = newProj->pi_exprContext;
697 : 0 : econtext->ecxt_outertuple = planSlot;
698 : 0 : return ExecProject(newProj);
699 : : }
700 : :
701 : : /*
702 : : * ExecGetUpdateNewTuple
703 : : * This prepares a "new" tuple by combining an UPDATE subplan's output
704 : : * tuple (which contains values of changed columns) with unchanged
705 : : * columns taken from the old tuple.
706 : : *
707 : : * The subplan tuple might also contain junk columns, which are ignored.
708 : : * Note that the projection also ensures we have a slot of the right type.
709 : : */
710 : : TupleTableSlot *
1110 tgl@sss.pgh.pa.us 711 :CBC 153434 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
712 : : TupleTableSlot *planSlot,
713 : : TupleTableSlot *oldSlot)
714 : : {
398 dean.a.rasheed@gmail 715 : 153434 : ProjectionInfo *newProj = relinfo->ri_projectNew;
716 : : ExprContext *econtext;
717 : :
718 : : /* Use a few extra Asserts to protect against outside callers */
1104 tgl@sss.pgh.pa.us 719 [ - + ]: 153434 : Assert(relinfo->ri_projectNewInfoValid);
1110 720 [ + - - + ]: 153434 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
721 [ + - - + ]: 153434 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
722 : :
723 : 153434 : econtext = newProj->pi_exprContext;
724 : 153434 : econtext->ecxt_outertuple = planSlot;
725 : 153434 : econtext->ecxt_scantuple = oldSlot;
726 : 153434 : return ExecProject(newProj);
727 : : }
728 : :
729 : : /* ----------------------------------------------------------------
730 : : * ExecInsert
731 : : *
732 : : * For INSERT, we have to insert the tuple into the target relation
733 : : * (or partition thereof) and insert appropriate tuples into the index
734 : : * relations.
735 : : *
736 : : * slot contains the new tuple value to be stored.
737 : : *
738 : : * Returns RETURNING result if any, otherwise NULL.
739 : : * *inserted_tuple is the tuple that's effectively inserted;
740 : : * *insert_destrel is the relation where it was inserted.
741 : : * These are only set on success.
742 : : *
743 : : * This may change the currently active tuple conversion map in
744 : : * mtstate->mt_transition_capture, so the callers must take care to
745 : : * save the previous value to avoid losing track of it.
746 : : * ----------------------------------------------------------------
747 : : */
748 : : static TupleTableSlot *
759 alvherre@alvh.no-ip. 749 : 5689995 : ExecInsert(ModifyTableContext *context,
750 : : ResultRelInfo *resultRelInfo,
751 : : TupleTableSlot *slot,
752 : : bool canSetTag,
753 : : TupleTableSlot **inserted_tuple,
754 : : ResultRelInfo **insert_destrel)
755 : : {
756 : 5689995 : ModifyTableState *mtstate = context->mtstate;
757 : 5689995 : EState *estate = context->estate;
758 : : Relation resultRelationDesc;
5300 tgl@sss.pgh.pa.us 759 : 5689995 : List *recheckIndexes = NIL;
759 alvherre@alvh.no-ip. 760 : 5689995 : TupleTableSlot *planSlot = context->planSlot;
2561 rhaas@postgresql.org 761 : 5689995 : TupleTableSlot *result = NULL;
762 : : TransitionCaptureState *ar_insert_trig_tcs;
2218 alvherre@alvh.no-ip. 763 : 5689995 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
764 : 5689995 : OnConflictAction onconflict = node->onConflictAction;
1278 heikki.linnakangas@i 765 : 5689995 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
766 : : MemoryContext oldContext;
767 : :
768 : : /*
769 : : * If the input result relation is a partitioned table, find the leaf
770 : : * partition to insert the tuple into.
771 : : */
772 [ + + ]: 5689995 : if (proute)
773 : : {
774 : : ResultRelInfo *partRelInfo;
775 : :
776 : 361343 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
777 : : resultRelInfo, slot,
778 : : &partRelInfo);
779 : 361241 : resultRelInfo = partRelInfo;
780 : : }
781 : :
782 : 5689893 : ExecMaterializeSlot(slot);
783 : :
5300 tgl@sss.pgh.pa.us 784 : 5689893 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
785 : :
786 : : /*
787 : : * Open the table's indexes, if we have not done so already, so that we
788 : : * can add new index entries for the inserted tuple.
789 : : */
1104 790 [ + + ]: 5689893 : if (resultRelationDesc->rd_rel->relhasindex &&
791 [ + + ]: 1496228 : resultRelInfo->ri_IndexRelationDescs == NULL)
792 : 15148 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
793 : :
794 : : /*
795 : : * BEFORE ROW INSERT Triggers.
796 : : *
797 : : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
798 : : * INSERT ... ON CONFLICT statement. We cannot check for constraint
799 : : * violations before firing these triggers, because they can change the
800 : : * values to insert. Also, they can run arbitrary user-defined code with
801 : : * side-effects that we can't cancel by just not inserting the tuple.
802 : : */
5300 803 [ + + ]: 5689893 : if (resultRelInfo->ri_TrigDesc &&
4935 804 [ + + ]: 37594 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
805 : : {
806 : : /* Flush any pending inserts, so rows are visible to the triggers */
506 efujita@postgresql.o 807 [ + + ]: 1058 : if (estate->es_insert_pending_result_relations != NIL)
808 : 3 : ExecPendingInserts(estate);
809 : :
1874 andres@anarazel.de 810 [ + + ]: 1058 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
811 : 100 : return NULL; /* "do nothing" */
812 : : }
813 : :
814 : : /* INSTEAD OF ROW INSERT Triggers */
4935 tgl@sss.pgh.pa.us 815 [ + + ]: 5689735 : if (resultRelInfo->ri_TrigDesc &&
816 [ + + ]: 37436 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
817 : : {
1874 andres@anarazel.de 818 [ + + ]: 84 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
819 : 3 : return NULL; /* "do nothing" */
820 : : }
4053 tgl@sss.pgh.pa.us 821 [ + + ]: 5689651 : else if (resultRelInfo->ri_FdwRoutine)
822 : : {
823 : : /*
824 : : * GENERATED expressions might reference the tableoid column, so
825 : : * (re-)initialize tts_tableOid before evaluating them.
826 : : */
1059 827 : 1007 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
828 : :
829 : : /*
830 : : * Compute stored generated columns
831 : : */
1842 peter@eisentraut.org 832 [ + + ]: 1007 : if (resultRelationDesc->rd_att->constr &&
833 [ + + ]: 183 : resultRelationDesc->rd_att->constr->has_generated_stored)
1278 heikki.linnakangas@i 834 : 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
835 : : CMD_INSERT);
836 : :
837 : : /*
838 : : * If the FDW supports batching, and batching is requested, accumulate
839 : : * rows and insert them in batches. Otherwise use the per-row inserts.
840 : : */
1180 tomas.vondra@postgre 841 [ + + ]: 1007 : if (resultRelInfo->ri_BatchSize > 1)
842 : : {
506 efujita@postgresql.o 843 : 144 : bool flushed = false;
844 : :
845 : : /*
846 : : * When we've reached the desired batch size, perform the
847 : : * insertion.
848 : : */
1180 tomas.vondra@postgre 849 [ + + ]: 144 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
850 : : {
851 : 10 : ExecBatchInsert(mtstate, resultRelInfo,
852 : : resultRelInfo->ri_Slots,
853 : : resultRelInfo->ri_PlanSlots,
854 : : resultRelInfo->ri_NumSlots,
855 : : estate, canSetTag);
506 efujita@postgresql.o 856 : 10 : flushed = true;
857 : : }
858 : :
1180 tomas.vondra@postgre 859 : 144 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
860 : :
861 [ + + ]: 144 : if (resultRelInfo->ri_Slots == NULL)
862 : : {
863 : 28 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
1068 tgl@sss.pgh.pa.us 864 : 14 : resultRelInfo->ri_BatchSize);
1180 tomas.vondra@postgre 865 : 14 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
1068 tgl@sss.pgh.pa.us 866 : 14 : resultRelInfo->ri_BatchSize);
867 : : }
868 : :
869 : : /*
870 : : * Initialize the batch slots. We don't know how many slots will
871 : : * be needed, so we initialize them as the batch grows, and we
872 : : * keep them across batches. To mitigate an inefficiency in how
873 : : * resource owner handles objects with many references (as with
874 : : * many slots all referencing the same tuple descriptor) we copy
875 : : * the appropriate tuple descriptor for each slot.
876 : : */
1038 tomas.vondra@postgre 877 [ + + ]: 144 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
878 : : {
1021 andrew@dunslane.net 879 : 71 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
880 : : TupleDesc plan_tdesc =
331 tgl@sss.pgh.pa.us 881 : 71 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
882 : :
1038 tomas.vondra@postgre 883 : 142 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
884 : 71 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
885 : :
886 : 142 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
976 887 : 71 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
888 : :
889 : : /* remember how many batch slots we initialized */
1038 890 : 71 : resultRelInfo->ri_NumSlotsInitialized++;
891 : : }
892 : :
1033 893 : 144 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
894 : : slot);
895 : :
896 : 144 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
897 : : planSlot);
898 : :
899 : : /*
900 : : * If these are the first tuples stored in the buffers, add the
901 : : * target rel and the mtstate to the
902 : : * es_insert_pending_result_relations and
903 : : * es_insert_pending_modifytables lists respectively, except in
904 : : * the case where flushing was done above, in which case they
905 : : * would already have been added to the lists, so no need to do
906 : : * this.
907 : : */
506 efujita@postgresql.o 908 [ + + + + ]: 144 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
909 : : {
910 [ - + ]: 18 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
911 : : resultRelInfo));
912 : 18 : estate->es_insert_pending_result_relations =
913 : 18 : lappend(estate->es_insert_pending_result_relations,
914 : : resultRelInfo);
493 915 : 18 : estate->es_insert_pending_modifytables =
916 : 18 : lappend(estate->es_insert_pending_modifytables, mtstate);
917 : : }
506 918 [ - + ]: 144 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
919 : : resultRelInfo));
920 : :
1180 tomas.vondra@postgre 921 : 144 : resultRelInfo->ri_NumSlots++;
922 : :
923 : 144 : MemoryContextSwitchTo(oldContext);
924 : :
925 : 144 : return NULL;
926 : : }
927 : :
928 : : /*
929 : : * insert into foreign table: let the FDW do it
930 : : */
4053 tgl@sss.pgh.pa.us 931 : 863 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
932 : : resultRelInfo,
933 : : slot,
934 : : planSlot);
935 : :
936 [ + + ]: 860 : if (slot == NULL) /* "do nothing" */
937 : 2 : return NULL;
938 : :
939 : : /*
940 : : * AFTER ROW Triggers or RETURNING expressions might reference the
941 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
942 : : * them. (This covers the case where the FDW replaced the slot.)
943 : : */
1874 andres@anarazel.de 944 : 858 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
945 : : }
946 : : else
947 : : {
948 : : WCOKind wco_kind;
949 : :
950 : : /*
951 : : * Constraints and GENERATED expressions might reference the tableoid
952 : : * column, so (re-)initialize tts_tableOid before evaluating them.
953 : : */
954 : 5688644 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
955 : :
956 : : /*
957 : : * Compute stored generated columns
958 : : */
1842 peter@eisentraut.org 959 [ + + ]: 5688644 : if (resultRelationDesc->rd_att->constr &&
960 [ + + ]: 1607924 : resultRelationDesc->rd_att->constr->has_generated_stored)
1278 heikki.linnakangas@i 961 : 519 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
962 : : CMD_INSERT);
963 : :
964 : : /*
965 : : * Check any RLS WITH CHECK policies.
966 : : *
967 : : * Normally we should check INSERT policies. But if the insert is the
968 : : * result of a partition key update that moved the tuple to a new
969 : : * partition, we should instead check UPDATE policies, because we are
970 : : * executing policies defined on the target table, and not those
971 : : * defined on the child partitions.
972 : : *
973 : : * If we're running MERGE, we refer to the action that we're executing
974 : : * to know if we're doing an INSERT or UPDATE to a partition table.
975 : : */
748 alvherre@alvh.no-ip. 976 [ + + ]: 5688638 : if (mtstate->operation == CMD_UPDATE)
977 : 355 : wco_kind = WCO_RLS_UPDATE_CHECK;
978 [ + + ]: 5688283 : else if (mtstate->operation == CMD_MERGE)
28 dean.a.rasheed@gmail 979 :GNC 854 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
748 alvherre@alvh.no-ip. 980 [ + + ]:CBC 854 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
981 : : else
982 : 5687429 : wco_kind = WCO_RLS_INSERT_CHECK;
983 : :
984 : : /*
985 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
986 : : * we are looking for at this point.
987 : : */
3278 sfrost@snowman.net 988 [ + + ]: 5688638 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2277 rhaas@postgresql.org 989 : 276 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
990 : :
991 : : /*
992 : : * Check the constraints of the tuple.
993 : : */
2134 alvherre@alvh.no-ip. 994 [ + + ]: 5688551 : if (resultRelationDesc->rd_att->constr)
995 : 1607876 : ExecConstraints(resultRelInfo, slot, estate);
996 : :
997 : : /*
998 : : * Also check the tuple against the partition constraint, if there is
999 : : * one; except that if we got here via tuple-routing, we don't need to
1000 : : * if there's no BR trigger defined on the partition.
1001 : : */
1306 tgl@sss.pgh.pa.us 1002 [ + + ]: 5688228 : if (resultRelationDesc->rd_rel->relispartition &&
1161 heikki.linnakangas@i 1003 [ + + ]: 362564 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
2134 alvherre@alvh.no-ip. 1004 [ + + ]: 360949 : (resultRelInfo->ri_TrigDesc &&
1005 [ + + ]: 760 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1006 : 1713 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1007 : :
3264 andres@anarazel.de 1008 [ + + + - ]: 5688144 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1009 : 2005 : {
1010 : : /* Perform a speculative insertion. */
1011 : : uint32 specToken;
1012 : : ItemPointerData conflictTid;
1013 : : bool specConflict;
1014 : : List *arbiterIndexes;
1015 : :
2211 alvherre@alvh.no-ip. 1016 : 4690 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1017 : :
1018 : : /*
1019 : : * Do a non-conclusive check for conflicts first.
1020 : : *
1021 : : * We're not holding any locks yet, so this doesn't guarantee that
1022 : : * the later insert won't conflict. But it avoids leaving behind
1023 : : * a lot of canceled speculative insertions, if you run a lot of
1024 : : * INSERT ON CONFLICT statements that do conflict.
1025 : : *
1026 : : * We loop back here if we find a conflict below, either during
1027 : : * the pre-check, or when we re-check after inserting the tuple
1028 : : * speculatively. Better allow interrupts in case some bug makes
1029 : : * this an infinite loop.
1030 : : */
3264 andres@anarazel.de 1031 : 5 : vlock:
619 tgl@sss.pgh.pa.us 1032 [ - + ]: 4695 : CHECK_FOR_INTERRUPTS();
3264 andres@anarazel.de 1033 : 4695 : specConflict = false;
1278 heikki.linnakangas@i 1034 [ + + ]: 4695 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1035 : : &conflictTid, arbiterIndexes))
1036 : : {
1037 : : /* committed conflict tuple found */
3264 andres@anarazel.de 1038 [ + + ]: 2679 : if (onconflict == ONCONFLICT_UPDATE)
1039 : : {
1040 : : /*
1041 : : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1042 : : * part. Be prepared to retry if the UPDATE fails because
1043 : : * of another concurrent UPDATE/DELETE to the conflict
1044 : : * tuple.
1045 : : */
1046 : 2600 : TupleTableSlot *returning = NULL;
1047 : :
759 alvherre@alvh.no-ip. 1048 [ + - ]: 2600 : if (ExecOnConflictUpdate(context, resultRelInfo,
1049 : : &conflictTid, slot, canSetTag,
1050 : : &returning))
1051 : : {
2196 1052 [ - + ]: 2561 : InstrCountTuples2(&mtstate->ps, 1);
3264 andres@anarazel.de 1053 : 2561 : return returning;
1054 : : }
1055 : : else
3264 andres@anarazel.de 1056 :UBC 0 : goto vlock;
1057 : : }
1058 : : else
1059 : : {
1060 : : /*
1061 : : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1062 : : * verify that the tuple is visible to the executor's MVCC
1063 : : * snapshot at higher isolation levels.
1064 : : *
1065 : : * Using ExecGetReturningSlot() to store the tuple for the
1066 : : * recheck isn't that pretty, but we can't trivially use
1067 : : * the input slot, because it might not be of a compatible
1068 : : * type. As there's no conflicting usage of
1069 : : * ExecGetReturningSlot() in the DO NOTHING case...
1070 : : */
3264 andres@anarazel.de 1071 [ - + ]:CBC 79 : Assert(onconflict == ONCONFLICT_NOTHING);
1849 1072 : 79 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1073 : : ExecGetReturningSlot(estate, resultRelInfo));
2196 alvherre@alvh.no-ip. 1074 [ - + ]: 69 : InstrCountTuples2(&mtstate->ps, 1);
3264 andres@anarazel.de 1075 : 69 : return NULL;
1076 : : }
1077 : : }
1078 : :
1079 : : /*
1080 : : * Before we start insertion proper, acquire our "speculative
1081 : : * insertion lock". Others can use that to wait for us to decide
1082 : : * if we're going to go ahead with the insertion, instead of
1083 : : * waiting for the whole transaction to complete.
1084 : : */
1085 : 2013 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1086 : :
1087 : : /* insert the tuple, with the speculative token */
1788 1088 : 2013 : table_tuple_insert_speculative(resultRelationDesc, slot,
1089 : : estate->es_output_cid,
1090 : : 0,
1091 : : NULL,
1092 : : specToken);
1093 : :
1094 : : /* insert index entries for tuple */
1278 heikki.linnakangas@i 1095 : 2013 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1096 : : slot, estate, false, true,
1097 : : &specConflict,
1098 : : arbiterIndexes,
1099 : : false);
1100 : :
1101 : : /* adjust the tuple's state accordingly */
1788 andres@anarazel.de 1102 : 2010 : table_tuple_complete_speculative(resultRelationDesc, slot,
1103 : 2010 : specToken, !specConflict);
1104 : :
1105 : : /*
1106 : : * Wake up anyone waiting for our decision. They will re-check
1107 : : * the tuple, see that it's no longer speculative, and wait on our
1108 : : * XID as if this was a regularly inserted tuple all along. Or if
1109 : : * we killed the tuple, they will see it's dead, and proceed as if
1110 : : * the tuple never existed.
1111 : : */
3264 1112 : 2010 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1113 : :
1114 : : /*
1115 : : * If there was a conflict, start from the beginning. We'll do
1116 : : * the pre-check again, which will now find the conflicting tuple
1117 : : * (unless it aborts before we get there).
1118 : : */
1119 [ + + ]: 2010 : if (specConflict)
1120 : : {
1121 : 5 : list_free(recheckIndexes);
1122 : 5 : goto vlock;
1123 : : }
1124 : :
1125 : : /* Since there was no insertion conflict, we're done */
1126 : : }
1127 : : else
1128 : : {
1129 : : /* insert the tuple normally */
3 akorotkov@postgresql 1130 : 5683454 : table_tuple_insert(resultRelationDesc, slot,
1131 : : estate->es_output_cid,
1132 : : 0, NULL);
1133 : :
1134 : : /* insert index entries for tuple */
1135 [ + + ]: 5683439 : if (resultRelInfo->ri_NumIndices > 0)
1278 heikki.linnakangas@i 1136 : 1491043 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1137 : : slot, estate, false,
1138 : : false, NULL, NIL,
1139 : : false);
1140 : : }
1141 : : }
1142 : :
4797 tgl@sss.pgh.pa.us 1143 [ + + ]: 5686113 : if (canSetTag)
1144 : 5685527 : (estate->es_processed)++;
1145 : :
1146 : : /*
1147 : : * If this insert is the result of a partition key update that moved the
1148 : : * tuple to a new partition, put this row into the transition NEW TABLE,
1149 : : * if there is one. We need to do this separately for DELETE and INSERT
1150 : : * because they happen on different tables.
1151 : : */
2277 rhaas@postgresql.org 1152 : 5686113 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1153 [ + + + + ]: 5686113 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1154 [ + - ]: 21 : && mtstate->mt_transition_capture->tcs_update_new_table)
1155 : : {
756 alvherre@alvh.no-ip. 1156 : 21 : ExecARUpdateTriggers(estate, resultRelInfo,
1157 : : NULL, NULL,
1158 : : NULL,
1159 : : NULL,
1160 : : slot,
1161 : : NULL,
1162 : 21 : mtstate->mt_transition_capture,
1163 : : false);
1164 : :
1165 : : /*
1166 : : * We've already captured the NEW TABLE row, so make sure any AR
1167 : : * INSERT trigger fired below doesn't capture it again.
1168 : : */
2277 rhaas@postgresql.org 1169 : 21 : ar_insert_trig_tcs = NULL;
1170 : : }
1171 : :
1172 : : /* AFTER ROW INSERT Triggers */
1874 andres@anarazel.de 1173 : 5686113 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1174 : : ar_insert_trig_tcs);
1175 : :
5187 tgl@sss.pgh.pa.us 1176 : 5686113 : list_free(recheckIndexes);
1177 : :
1178 : : /*
1179 : : * Check any WITH CHECK OPTION constraints from parent views. We are
1180 : : * required to do this after testing all constraints and uniqueness
1181 : : * violations per the SQL spec, so we do it after actually inserting the
1182 : : * record into the heap and all indexes.
1183 : : *
1184 : : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1185 : : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1186 : : *
1187 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1188 : : * are looking for at this point.
1189 : : */
3923 sfrost@snowman.net 1190 [ + + ]: 5686113 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3278 1191 : 182 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1192 : :
1193 : : /* Process RETURNING if present */
5300 tgl@sss.pgh.pa.us 1194 [ + + ]: 5686040 : if (resultRelInfo->ri_projectReturning)
2642 rhaas@postgresql.org 1195 : 1732 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1196 : :
756 alvherre@alvh.no-ip. 1197 [ + + ]: 5686034 : if (inserted_tuple)
1198 : 368 : *inserted_tuple = slot;
1199 [ + + ]: 5686034 : if (insert_destrel)
1200 : 368 : *insert_destrel = resultRelInfo;
1201 : :
2642 rhaas@postgresql.org 1202 : 5686034 : return result;
1203 : : }
1204 : :
1205 : : /* ----------------------------------------------------------------
1206 : : * ExecBatchInsert
1207 : : *
1208 : : * Insert multiple tuples in an efficient way.
1209 : : * Currently, this handles inserting into a foreign table without
1210 : : * RETURNING clause.
1211 : : * ----------------------------------------------------------------
1212 : : */
1213 : : static void
1180 tomas.vondra@postgre 1214 : 28 : ExecBatchInsert(ModifyTableState *mtstate,
1215 : : ResultRelInfo *resultRelInfo,
1216 : : TupleTableSlot **slots,
1217 : : TupleTableSlot **planSlots,
1218 : : int numSlots,
1219 : : EState *estate,
1220 : : bool canSetTag)
1221 : : {
1222 : : int i;
1223 : 28 : int numInserted = numSlots;
1224 : 28 : TupleTableSlot *slot = NULL;
1225 : : TupleTableSlot **rslots;
1226 : :
1227 : : /*
1228 : : * insert into foreign table: let the FDW do it
1229 : : */
1230 : 28 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1231 : : resultRelInfo,
1232 : : slots,
1233 : : planSlots,
1234 : : &numInserted);
1235 : :
1236 [ + + ]: 172 : for (i = 0; i < numInserted; i++)
1237 : : {
1238 : 144 : slot = rslots[i];
1239 : :
1240 : : /*
1241 : : * AFTER ROW Triggers might reference the tableoid column, so
1242 : : * (re-)initialize tts_tableOid before evaluating them.
1243 : : */
1244 : 144 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1245 : :
1246 : : /* AFTER ROW INSERT Triggers */
1247 : 144 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1248 : 144 : mtstate->mt_transition_capture);
1249 : :
1250 : : /*
1251 : : * Check any WITH CHECK OPTION constraints from parent views. See the
1252 : : * comment in ExecInsert.
1253 : : */
1254 [ - + ]: 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1180 tomas.vondra@postgre 1255 :UBC 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1256 : : }
1257 : :
1180 tomas.vondra@postgre 1258 [ + - + - ]:CBC 28 : if (canSetTag && numInserted > 0)
1259 : 28 : estate->es_processed += numInserted;
1260 : :
1261 : : /* Clean up all the slots, ready for the next batch */
355 michael@paquier.xyz 1262 [ + + ]: 172 : for (i = 0; i < numSlots; i++)
1263 : : {
1264 : 144 : ExecClearTuple(slots[i]);
1265 : 144 : ExecClearTuple(planSlots[i]);
1266 : : }
1267 : 28 : resultRelInfo->ri_NumSlots = 0;
1180 tomas.vondra@postgre 1268 : 28 : }
1269 : :
1270 : : /*
1271 : : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1272 : : */
1273 : : static void
506 efujita@postgresql.o 1274 : 17 : ExecPendingInserts(EState *estate)
1275 : : {
1276 : : ListCell *l1,
1277 : : *l2;
1278 : :
493 1279 [ + - + + : 35 : forboth(l1, estate->es_insert_pending_result_relations,
+ - + + +
+ + - +
+ ]
1280 : : l2, estate->es_insert_pending_modifytables)
1281 : : {
1282 : 18 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1283 : 18 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1284 : :
506 1285 [ - + ]: 18 : Assert(mtstate);
1286 : 18 : ExecBatchInsert(mtstate, resultRelInfo,
1287 : : resultRelInfo->ri_Slots,
1288 : : resultRelInfo->ri_PlanSlots,
1289 : : resultRelInfo->ri_NumSlots,
1290 : 18 : estate, mtstate->canSetTag);
1291 : : }
1292 : :
1293 : 17 : list_free(estate->es_insert_pending_result_relations);
493 1294 : 17 : list_free(estate->es_insert_pending_modifytables);
506 1295 : 17 : estate->es_insert_pending_result_relations = NIL;
493 1296 : 17 : estate->es_insert_pending_modifytables = NIL;
506 1297 : 17 : }
1298 : :
1299 : : /*
1300 : : * ExecDeletePrologue -- subroutine for ExecDelete
1301 : : *
1302 : : * Prepare executor state for DELETE. Actually, the only thing we have to do
1303 : : * here is execute BEFORE ROW triggers. We return false if one of them makes
1304 : : * the delete a no-op; otherwise, return true.
1305 : : */
1306 : : static bool
759 alvherre@alvh.no-ip. 1307 : 820577 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1308 : : ItemPointer tupleid, HeapTuple oldtuple,
1309 : : TupleTableSlot **epqreturnslot, TM_Result *result)
1310 : : {
398 dean.a.rasheed@gmail 1311 [ + + ]: 820577 : if (result)
1312 : 673 : *result = TM_Ok;
1313 : :
1314 : : /* BEFORE ROW DELETE triggers */
759 alvherre@alvh.no-ip. 1315 [ + + ]: 820577 : if (resultRelInfo->ri_TrigDesc &&
1316 [ + + ]: 3548 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1317 : : {
1318 : : /* Flush any pending inserts, so rows are visible to the triggers */
506 efujita@postgresql.o 1319 [ + + ]: 194 : if (context->estate->es_insert_pending_result_relations != NIL)
1320 : 1 : ExecPendingInserts(context->estate);
1321 : :
759 alvherre@alvh.no-ip. 1322 : 194 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1323 : : resultRelInfo, tupleid, oldtuple,
1324 : : epqreturnslot, result, &context->tmfd);
1325 : : }
1326 : :
1327 : 820383 : return true;
1328 : : }
1329 : :
1330 : : /*
1331 : : * ExecDeleteAct -- subroutine for ExecDelete
1332 : : *
1333 : : * Actually delete the tuple from a plain table.
1334 : : *
1335 : : * Caller is in charge of doing EvalPlanQual as necessary
1336 : : */
1337 : : static TM_Result
1338 : 820483 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1339 : : ItemPointer tupleid, bool changingPart)
1340 : : {
1341 : 820483 : EState *estate = context->estate;
1342 : :
1343 : 820483 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1344 : : estate->es_output_cid,
1345 : : estate->es_snapshot,
1346 : : estate->es_crosscheck_snapshot,
1347 : : true /* wait for commit */ ,
1348 : : &context->tmfd,
1349 : : changingPart);
1350 : : }
1351 : :
1352 : : /*
1353 : : * ExecDeleteEpilogue -- subroutine for ExecDelete
1354 : : *
1355 : : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1356 : : * including the UPDATE triggers if the deletion is being done as part of a
1357 : : * cross-partition tuple move.
1358 : : */
1359 : : static void
1360 : 820453 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1361 : : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1362 : : {
1363 : 820453 : ModifyTableState *mtstate = context->mtstate;
1364 : 820453 : EState *estate = context->estate;
1365 : : TransitionCaptureState *ar_delete_trig_tcs;
1366 : :
1367 : : /*
1368 : : * If this delete is the result of a partition key update that moved the
1369 : : * tuple to a new partition, put this row into the transition OLD TABLE,
1370 : : * if there is one. We need to do this separately for DELETE and INSERT
1371 : : * because they happen on different tables.
1372 : : */
1373 : 820453 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1374 [ + + + + ]: 820453 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1375 [ + - ]: 21 : mtstate->mt_transition_capture->tcs_update_old_table)
1376 : : {
756 1377 : 21 : ExecARUpdateTriggers(estate, resultRelInfo,
1378 : : NULL, NULL,
1379 : : tupleid, oldtuple,
3 akorotkov@postgresql 1380 : 21 : NULL, NULL, mtstate->mt_transition_capture,
1381 : : false);
1382 : :
1383 : : /*
1384 : : * We've already captured the OLD TABLE row, so make sure any AR
1385 : : * DELETE trigger fired below doesn't capture it again.
1386 : : */
759 alvherre@alvh.no-ip. 1387 : 21 : ar_delete_trig_tcs = NULL;
1388 : : }
1389 : :
1390 : : /* AFTER ROW DELETE Triggers */
3 akorotkov@postgresql 1391 : 820453 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1392 : : ar_delete_trig_tcs, changingPart);
759 alvherre@alvh.no-ip. 1393 : 820453 : }
1394 : :
1395 : : /* ----------------------------------------------------------------
1396 : : * ExecDelete
1397 : : *
1398 : : * DELETE is like UPDATE, except that we delete the tuple and no
1399 : : * index modifications are needed.
1400 : : *
1401 : : * When deleting from a table, tupleid identifies the tuple to
1402 : : * delete and oldtuple is NULL. When deleting from a view,
1403 : : * oldtuple is passed to the INSTEAD OF triggers and identifies
1404 : : * what to delete, and tupleid is invalid. When deleting from a
1405 : : * foreign table, tupleid is invalid; the FDW has to figure out
1406 : : * which row to delete using data from the planSlot. oldtuple is
1407 : : * passed to foreign table triggers; it is NULL when the foreign
1408 : : * table has no relevant triggers. We use tupleDeleted to indicate
1409 : : * whether the tuple is actually deleted, callers can use it to
1410 : : * decide whether to continue the operation. When this DELETE is a
1411 : : * part of an UPDATE of partition-key, then the slot returned by
1412 : : * EvalPlanQual() is passed back using output parameter epqreturnslot.
1413 : : *
1414 : : * Returns RETURNING result if any, otherwise NULL.
1415 : : * ----------------------------------------------------------------
1416 : : */
1417 : : static TupleTableSlot *
1418 : 820389 : ExecDelete(ModifyTableContext *context,
1419 : : ResultRelInfo *resultRelInfo,
1420 : : ItemPointer tupleid,
1421 : : HeapTuple oldtuple,
1422 : : bool processReturning,
1423 : : bool changingPart,
1424 : : bool canSetTag,
1425 : : TM_Result *tmresult,
1426 : : bool *tupleDeleted,
1427 : : TupleTableSlot **epqreturnslot)
1428 : : {
1429 : 820389 : EState *estate = context->estate;
1278 heikki.linnakangas@i 1430 : 820389 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
4053 tgl@sss.pgh.pa.us 1431 : 820389 : TupleTableSlot *slot = NULL;
1432 : : TM_Result result;
1433 : :
2277 rhaas@postgresql.org 1434 [ + + ]: 820389 : if (tupleDeleted)
1435 : 485 : *tupleDeleted = false;
1436 : :
1437 : : /*
1438 : : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1439 : : * done if it says we are.
1440 : : */
759 alvherre@alvh.no-ip. 1441 [ + + ]: 820389 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1442 : : epqreturnslot, tmresult))
1443 : 26 : return NULL;
1444 : :
1445 : : /* INSTEAD OF ROW DELETE Triggers */
4935 tgl@sss.pgh.pa.us 1446 [ + + ]: 820346 : if (resultRelInfo->ri_TrigDesc &&
1447 [ + + ]: 3477 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
5300 1448 : 24 : {
1449 : : bool dodelete;
1450 : :
4935 1451 [ - + ]: 27 : Assert(oldtuple != NULL);
3675 noah@leadboat.com 1452 : 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1453 : :
4935 tgl@sss.pgh.pa.us 1454 [ + + ]: 27 : if (!dodelete) /* "do nothing" */
5300 1455 : 3 : return NULL;
1456 : : }
4053 1457 [ + + ]: 820319 : else if (resultRelInfo->ri_FdwRoutine)
1458 : : {
1459 : : /*
1460 : : * delete from foreign table: let the FDW do it
1461 : : *
1462 : : * We offer the returning slot as a place to store RETURNING data,
1463 : : * although the FDW can return some other slot if it wants.
1464 : : */
1874 andres@anarazel.de 1465 : 17 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4053 tgl@sss.pgh.pa.us 1466 : 17 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1467 : : resultRelInfo,
1468 : : slot,
1469 : : context->planSlot);
1470 : :
1471 [ - + ]: 17 : if (slot == NULL) /* "do nothing" */
4053 tgl@sss.pgh.pa.us 1472 :UBC 0 : return NULL;
1473 : :
1474 : : /*
1475 : : * RETURNING expressions might reference the tableoid column, so
1476 : : * (re)initialize tts_tableOid before evaluating them.
1477 : : */
2008 andres@anarazel.de 1478 [ + + ]:CBC 17 : if (TTS_EMPTY(slot))
2992 rhaas@postgresql.org 1479 : 3 : ExecStoreAllNullTuple(slot);
1480 : :
1874 andres@anarazel.de 1481 : 17 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1482 : : }
1483 : : else
1484 : : {
1485 : : /*
1486 : : * delete the tuple
1487 : : *
1488 : : * Note: if context->estate->es_crosscheck_snapshot isn't
1489 : : * InvalidSnapshot, we check that the row to be deleted is visible to
1490 : : * that snapshot, and throw a can't-serialize error if not. This is a
1491 : : * special-case behavior needed for referential integrity updates in
1492 : : * transaction-snapshot mode transactions.
1493 : : */
552 john.naylor@postgres 1494 : 820302 : ldelete:
3 akorotkov@postgresql 1495 : 820304 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1496 : :
115 dean.a.rasheed@gmail 1497 [ + + ]: 820286 : if (tmresult)
1498 : 468 : *tmresult = result;
1499 : :
4935 tgl@sss.pgh.pa.us 1500 [ + + + + : 820286 : switch (result)
- ]
1501 : : {
1849 andres@anarazel.de 1502 : 15 : case TM_SelfModified:
1503 : :
1504 : : /*
1505 : : * The target tuple was already updated or deleted by the
1506 : : * current command, or by a later command in the current
1507 : : * transaction. The former case is possible in a join DELETE
1508 : : * where multiple tuples join to the same target tuple. This
1509 : : * is somewhat questionable, but Postgres has always allowed
1510 : : * it: we just ignore additional deletion attempts.
1511 : : *
1512 : : * The latter case arises if the tuple is modified by a
1513 : : * command in a BEFORE trigger, or perhaps by a command in a
1514 : : * volatile function used in the query. In such situations we
1515 : : * should not ignore the deletion, but it is equally unsafe to
1516 : : * proceed. We don't want to discard the original DELETE
1517 : : * while keeping the triggered actions based on its deletion;
1518 : : * and it would be no better to allow the original DELETE
1519 : : * while discarding updates that it triggered. The row update
1520 : : * carries some information that might be important according
1521 : : * to business rules; so throwing an error is the only safe
1522 : : * course.
1523 : : *
1524 : : * If a trigger actually intends this type of interaction, it
1525 : : * can re-execute the DELETE and then return NULL to cancel
1526 : : * the outer delete.
1527 : : */
759 alvherre@alvh.no-ip. 1528 [ + + ]: 15 : if (context->tmfd.cmax != estate->es_output_cid)
4188 kgrittn@postgresql.o 1529 [ + - ]: 3 : ereport(ERROR,
1530 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1531 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1532 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1533 : :
1534 : : /* Else, already deleted by self; nothing to do */
4935 tgl@sss.pgh.pa.us 1535 : 12 : return NULL;
1536 : :
1849 andres@anarazel.de 1537 : 820239 : case TM_Ok:
4935 tgl@sss.pgh.pa.us 1538 : 820239 : break;
1539 : :
1849 andres@anarazel.de 1540 : 29 : case TM_Updated:
1541 : : {
1542 : : TupleTableSlot *inputslot;
1543 : : TupleTableSlot *epqslot;
1544 : :
1545 [ + + ]: 29 : if (IsolationUsesXactSnapshot())
1546 [ + - ]: 1 : ereport(ERROR,
1547 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1548 : : errmsg("could not serialize access due to concurrent update")));
1549 : :
1550 : : /*
1551 : : * Already know that we're going to need to do EPQ, so
1552 : : * fetch tuple directly into the right slot.
1553 : : */
3 akorotkov@postgresql 1554 : 28 : EvalPlanQualBegin(context->epqstate);
1555 : 28 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1556 : : resultRelInfo->ri_RangeTableIndex);
1557 : :
1558 : 28 : result = table_tuple_lock(resultRelationDesc, tupleid,
1559 : : estate->es_snapshot,
1560 : : inputslot, estate->es_output_cid,
1561 : : LockTupleExclusive, LockWaitBlock,
1562 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1563 : : &context->tmfd);
1564 : :
1565 [ + + + - ]: 26 : switch (result)
1566 : : {
1567 : 23 : case TM_Ok:
1568 [ - + ]: 23 : Assert(context->tmfd.traversed);
1569 : 23 : epqslot = EvalPlanQual(context->epqstate,
1570 : : resultRelationDesc,
1571 : : resultRelInfo->ri_RangeTableIndex,
1572 : : inputslot);
1573 [ + - + + ]: 23 : if (TupIsNull(epqslot))
1574 : : /* Tuple not passing quals anymore, exiting... */
1575 : 15 : return NULL;
1576 : :
1577 : : /*
1578 : : * If requested, skip delete and pass back the
1579 : : * updated row.
1580 : : */
1581 [ + + ]: 8 : if (epqreturnslot)
1582 : : {
1583 : 6 : *epqreturnslot = epqslot;
1584 : 6 : return NULL;
1585 : : }
1586 : : else
1587 : 2 : goto ldelete;
1588 : :
1589 : 2 : case TM_SelfModified:
1590 : :
1591 : : /*
1592 : : * This can be reached when following an update
1593 : : * chain from a tuple updated by another session,
1594 : : * reaching a tuple that was already updated in
1595 : : * this transaction. If previously updated by this
1596 : : * command, ignore the delete, otherwise error
1597 : : * out.
1598 : : *
1599 : : * See also TM_SelfModified response to
1600 : : * table_tuple_delete() above.
1601 : : */
1602 [ + + ]: 2 : if (context->tmfd.cmax != estate->es_output_cid)
1603 [ + - ]: 1 : ereport(ERROR,
1604 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1605 : : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1606 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1607 : 1 : return NULL;
1608 : :
1609 : 1 : case TM_Deleted:
1610 : : /* tuple already deleted; nothing to do */
1611 : 1 : return NULL;
1612 : :
3 akorotkov@postgresql 1613 :UBC 0 : default:
1614 : :
1615 : : /*
1616 : : * TM_Invisible should be impossible because we're
1617 : : * waiting for updated row versions, and would
1618 : : * already have errored out if the first version
1619 : : * is invisible.
1620 : : *
1621 : : * TM_Updated should be impossible, because we're
1622 : : * locking the latest version via
1623 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1624 : : */
1625 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1626 : : result);
1627 : : return NULL;
1628 : : }
1629 : :
1630 : : Assert(false);
1631 : : break;
1632 : : }
1633 : :
1849 andres@anarazel.de 1634 :CBC 3 : case TM_Deleted:
1635 [ - + ]: 3 : if (IsolationUsesXactSnapshot())
1849 andres@anarazel.de 1636 [ # # ]:UBC 0 : ereport(ERROR,
1637 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1638 : : errmsg("could not serialize access due to concurrent delete")));
1639 : : /* tuple already deleted; nothing to do */
4935 tgl@sss.pgh.pa.us 1640 :CBC 3 : return NULL;
1641 : :
4935 tgl@sss.pgh.pa.us 1642 :UBC 0 : default:
1788 andres@anarazel.de 1643 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1644 : : result);
1645 : : return NULL;
1646 : : }
1647 : :
1648 : : /*
1649 : : * Note: Normally one would think that we have to delete index tuples
1650 : : * associated with the heap tuple now...
1651 : : *
1652 : : * ... but in POSTGRES, we have no need to do this because VACUUM will
1653 : : * take care of it later. We can't delete index tuples immediately
1654 : : * anyway, since the tuple is still visible to other transactions.
1655 : : */
1656 : : }
1657 : :
4797 tgl@sss.pgh.pa.us 1658 [ + + ]:CBC 820280 : if (canSetTag)
1659 : 819720 : (estate->es_processed)++;
1660 : :
1661 : : /* Tell caller that the delete actually happened. */
2277 rhaas@postgresql.org 1662 [ + + ]: 820280 : if (tupleDeleted)
1663 : 446 : *tupleDeleted = true;
1664 : :
3 akorotkov@postgresql 1665 : 820280 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1666 : :
1667 : : /* Process RETURNING if present and if requested */
2277 rhaas@postgresql.org 1668 [ + + + + ]: 820280 : if (processReturning && resultRelInfo->ri_projectReturning)
1669 : : {
1670 : : /*
1671 : : * We have to put the target tuple into a slot, which means first we
1672 : : * gotta fetch it. We can use the trigger tuple slot.
1673 : : */
1674 : : TupleTableSlot *rslot;
1675 : :
4053 tgl@sss.pgh.pa.us 1676 [ + + ]: 437 : if (resultRelInfo->ri_FdwRoutine)
1677 : : {
1678 : : /* FDW must have provided a slot containing the deleted row */
1679 [ + - - + ]: 3 : Assert(!TupIsNull(slot));
1680 : : }
1681 : : else
1682 : : {
1874 andres@anarazel.de 1683 : 434 : slot = ExecGetReturningSlot(estate, resultRelInfo);
4053 tgl@sss.pgh.pa.us 1684 [ + + ]: 434 : if (oldtuple != NULL)
1685 : : {
1822 andres@anarazel.de 1686 : 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1687 : : }
1688 : : else
1689 : : {
3 akorotkov@postgresql 1690 [ - + ]: 422 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1691 : : SnapshotAny, slot))
3 akorotkov@postgresql 1692 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1693 : : }
1694 : : }
1695 : :
759 alvherre@alvh.no-ip. 1696 :CBC 437 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1697 : :
1698 : : /*
1699 : : * Before releasing the target tuple again, make sure rslot has a
1700 : : * local copy of any pass-by-reference values.
1701 : : */
4053 tgl@sss.pgh.pa.us 1702 : 437 : ExecMaterializeSlot(rslot);
1703 : :
5300 1704 : 437 : ExecClearTuple(slot);
1705 : :
1706 : 437 : return rslot;
1707 : : }
1708 : :
1709 : 819843 : return NULL;
1710 : : }
1711 : :
1712 : : /*
1713 : : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1714 : : *
1715 : : * This works by first deleting the old tuple from the current partition,
1716 : : * followed by inserting the new tuple into the root parent table, that is,
1717 : : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1718 : : * correct partition.
1719 : : *
1720 : : * Returns true if the tuple has been successfully moved, or if it's found
1721 : : * that the tuple was concurrently deleted so there's nothing more to do
1722 : : * for the caller.
1723 : : *
1724 : : * False is returned if the tuple we're trying to move is found to have been
1725 : : * concurrently updated. In that case, the caller must check if the updated
1726 : : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1727 : : * this function again or perform a regular update accordingly. For MERGE,
1728 : : * the updated tuple is not returned in *retry_slot; it has its own retry
1729 : : * logic.
1730 : : */
1731 : : static bool
759 alvherre@alvh.no-ip. 1732 : 506 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1733 : : ResultRelInfo *resultRelInfo,
1734 : : ItemPointer tupleid, HeapTuple oldtuple,
1735 : : TupleTableSlot *slot,
1736 : : bool canSetTag,
1737 : : UpdateContext *updateCxt,
1738 : : TM_Result *tmresult,
1739 : : TupleTableSlot **retry_slot,
1740 : : TupleTableSlot **inserted_tuple,
1741 : : ResultRelInfo **insert_destrel)
1742 : : {
1743 : 506 : ModifyTableState *mtstate = context->mtstate;
1277 heikki.linnakangas@i 1744 : 506 : EState *estate = mtstate->ps.state;
1745 : : TupleConversionMap *tupconv_map;
1746 : : bool tuple_deleted;
1747 : 506 : TupleTableSlot *epqslot = NULL;
1748 : :
759 alvherre@alvh.no-ip. 1749 : 506 : context->cpUpdateReturningSlot = NULL;
398 dean.a.rasheed@gmail 1750 : 506 : *retry_slot = NULL;
1751 : :
1752 : : /*
1753 : : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1754 : : * to migrate to a different partition. Maybe this can be implemented
1755 : : * some day, but it seems a fringe feature with little redeeming value.
1756 : : */
1277 heikki.linnakangas@i 1757 [ - + ]: 506 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1277 heikki.linnakangas@i 1758 [ # # ]:UBC 0 : ereport(ERROR,
1759 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1760 : : errmsg("invalid ON UPDATE specification"),
1761 : : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1762 : :
1763 : : /*
1764 : : * When an UPDATE is run directly on a leaf partition, simply fail with a
1765 : : * partition constraint violation error.
1766 : : */
1104 tgl@sss.pgh.pa.us 1767 [ + + ]:CBC 506 : if (resultRelInfo == mtstate->rootResultRelInfo)
1277 heikki.linnakangas@i 1768 : 21 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1769 : :
1770 : : /* Initialize tuple routing info if not already done. */
1104 tgl@sss.pgh.pa.us 1771 [ + + ]: 485 : if (mtstate->mt_partition_tuple_routing == NULL)
1772 : : {
1773 : 301 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1774 : : MemoryContext oldcxt;
1775 : :
1776 : : /* Things built here have to last for the query duration. */
1777 : 301 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1778 : :
1779 : 301 : mtstate->mt_partition_tuple_routing =
1780 : 301 : ExecSetupPartitionTupleRouting(estate, rootRel);
1781 : :
1782 : : /*
1783 : : * Before a partition's tuple can be re-routed, it must first be
1784 : : * converted to the root's format, so we'll need a slot for storing
1785 : : * such tuples.
1786 : : */
1787 [ - + ]: 301 : Assert(mtstate->mt_root_tuple_slot == NULL);
1788 : 301 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1789 : :
1790 : 301 : MemoryContextSwitchTo(oldcxt);
1791 : : }
1792 : :
1793 : : /*
1794 : : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1795 : : * We want to return rows from INSERT.
1796 : : */
759 alvherre@alvh.no-ip. 1797 : 485 : ExecDelete(context, resultRelInfo,
1798 : : tupleid, oldtuple,
1799 : : false, /* processReturning */
1800 : : true, /* changingPart */
1801 : : false, /* canSetTag */
1802 : : tmresult, &tuple_deleted, &epqslot);
1803 : :
1804 : : /*
1805 : : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1806 : : * it was already deleted by self, or it was concurrently deleted by
1807 : : * another transaction), then we should skip the insert as well;
1808 : : * otherwise, an UPDATE could cause an increase in the total number of
1809 : : * rows across all partitions, which is clearly wrong.
1810 : : *
1811 : : * For a normal UPDATE, the case where the tuple has been the subject of a
1812 : : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1813 : : * machinery, but for an UPDATE that we've translated into a DELETE from
1814 : : * this partition and an INSERT into some other partition, that's not
1815 : : * available, because CTID chains can't span relation boundaries. We
1816 : : * mimic the semantics to a limited extent by skipping the INSERT if the
1817 : : * DELETE fails to find a tuple. This ensures that two concurrent
1818 : : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1819 : : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1820 : : * it.
1821 : : */
1277 heikki.linnakangas@i 1822 [ + + ]: 484 : if (!tuple_deleted)
1823 : : {
1824 : : /*
1825 : : * epqslot will be typically NULL. But when ExecDelete() finds that
1826 : : * another transaction has concurrently updated the same row, it
1827 : : * re-fetches the row, skips the delete, and epqslot is set to the
1828 : : * re-fetched tuple slot. In that case, we need to do all the checks
1829 : : * again. For MERGE, we leave everything to the caller (it must do
1830 : : * additional rechecking, and might end up executing a different
1831 : : * action entirely).
1832 : : */
28 dean.a.rasheed@gmail 1833 [ + + ]:GNC 38 : if (mtstate->operation == CMD_MERGE)
115 dean.a.rasheed@gmail 1834 :CBC 17 : return *tmresult == TM_Ok;
398 1835 [ + + - + ]: 21 : else if (TupIsNull(epqslot))
1277 heikki.linnakangas@i 1836 : 18 : return true;
1837 : : else
1838 : : {
1839 : : /* Fetch the most recent version of old tuple. */
1840 : : TupleTableSlot *oldSlot;
1841 : :
1842 : : /* ... but first, make sure ri_oldTupleSlot is initialized. */
3 akorotkov@postgresql 1843 [ - + ]: 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3 akorotkov@postgresql 1844 :UBC 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
3 akorotkov@postgresql 1845 :CBC 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1846 [ - + ]: 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1847 : : tupleid,
1848 : : SnapshotAny,
1849 : : oldSlot))
3 akorotkov@postgresql 1850 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
1851 : : /* and project the new tuple to retry the UPDATE with */
398 dean.a.rasheed@gmail 1852 :CBC 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1853 : : oldSlot);
1277 heikki.linnakangas@i 1854 : 3 : return false;
1855 : : }
1856 : : }
1857 : :
1858 : : /*
1859 : : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1860 : : * convert the tuple into root's tuple descriptor if needed, since
1861 : : * ExecInsert() starts the search from root.
1862 : : */
1104 tgl@sss.pgh.pa.us 1863 : 446 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1277 heikki.linnakangas@i 1864 [ + + ]: 446 : if (tupconv_map != NULL)
1865 : 151 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1866 : : slot,
1867 : : mtstate->mt_root_tuple_slot);
1868 : :
1869 : : /* Tuple routing starts from the root table. */
759 alvherre@alvh.no-ip. 1870 : 382 : context->cpUpdateReturningSlot =
756 1871 : 446 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1872 : : inserted_tuple, insert_destrel);
1873 : :
1874 : : /*
1875 : : * Reset the transition state that may possibly have been written by
1876 : : * INSERT.
1877 : : */
1277 heikki.linnakangas@i 1878 [ + + ]: 382 : if (mtstate->mt_transition_capture)
1879 : 21 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1880 : :
1881 : : /* We're done moving. */
1882 : 382 : return true;
1883 : : }
1884 : :
1885 : : /*
1886 : : * ExecUpdatePrologue -- subroutine for ExecUpdate
1887 : : *
1888 : : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1889 : : * triggers. We return false if one of them makes the update a no-op;
1890 : : * otherwise, return true.
1891 : : */
1892 : : static bool
759 alvherre@alvh.no-ip. 1893 : 156989 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1894 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1895 : : TM_Result *result)
1896 : : {
1897 : 156989 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1898 : :
398 dean.a.rasheed@gmail 1899 [ + + ]: 156989 : if (result)
1900 : 1053 : *result = TM_Ok;
1901 : :
759 alvherre@alvh.no-ip. 1902 : 156989 : ExecMaterializeSlot(slot);
1903 : :
1904 : : /*
1905 : : * Open the table's indexes, if we have not done so already, so that we
1906 : : * can add new index entries for the updated tuple.
1907 : : */
1908 [ + + ]: 156989 : if (resultRelationDesc->rd_rel->relhasindex &&
1909 [ + + ]: 111758 : resultRelInfo->ri_IndexRelationDescs == NULL)
1910 : 4237 : ExecOpenIndices(resultRelInfo, false);
1911 : :
1912 : : /* BEFORE ROW UPDATE triggers */
1913 [ + + ]: 156989 : if (resultRelInfo->ri_TrigDesc &&
1914 [ + + ]: 3076 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1915 : : {
1916 : : /* Flush any pending inserts, so rows are visible to the triggers */
506 efujita@postgresql.o 1917 [ + + ]: 1286 : if (context->estate->es_insert_pending_result_relations != NIL)
1918 : 1 : ExecPendingInserts(context->estate);
1919 : :
759 alvherre@alvh.no-ip. 1920 : 1286 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1921 : : resultRelInfo, tupleid, oldtuple, slot,
1922 : : result, &context->tmfd);
1923 : : }
1924 : :
1925 : 155703 : return true;
1926 : : }
1927 : :
1928 : : /*
1929 : : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1930 : : *
1931 : : * Apply the final modifications to the tuple slot before the update.
1932 : : * (This is split out because we also need it in the foreign-table code path.)
1933 : : */
1934 : : static void
1935 : 156852 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1936 : : TupleTableSlot *slot,
1937 : : EState *estate)
1938 : : {
1939 : 156852 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1940 : :
1941 : : /*
1942 : : * Constraints and GENERATED expressions might reference the tableoid
1943 : : * column, so (re-)initialize tts_tableOid before evaluating them.
1944 : : */
1945 : 156852 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1946 : :
1947 : : /*
1948 : : * Compute stored generated columns
1949 : : */
1950 [ + + ]: 156852 : if (resultRelationDesc->rd_att->constr &&
1951 [ + + ]: 93331 : resultRelationDesc->rd_att->constr->has_generated_stored)
1952 : 130 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1953 : : CMD_UPDATE);
1954 : 156852 : }
1955 : :
1956 : : /*
1957 : : * ExecUpdateAct -- subroutine for ExecUpdate
1958 : : *
1959 : : * Actually update the tuple, when operating on a plain table. If the
1960 : : * table is a partition, and the command was called referencing an ancestor
1961 : : * partitioned table, this routine migrates the resulting tuple to another
1962 : : * partition.
1963 : : *
1964 : : * The caller is in charge of keeping indexes current as necessary. The
1965 : : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1966 : : * be concurrently updated. However, in case of a cross-partition update,
1967 : : * this routine does it.
1968 : : */
1969 : : static TM_Result
1970 : 156778 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1971 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1972 : : bool canSetTag, UpdateContext *updateCxt)
1973 : : {
1974 : 156778 : EState *estate = context->estate;
1975 : 156778 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1976 : : bool partition_constraint_failed;
1977 : : TM_Result result;
1978 : :
1979 : 156778 : updateCxt->crossPartUpdate = false;
1980 : :
1981 : : /*
1982 : : * If we move the tuple to a new partition, we loop back here to recompute
1983 : : * GENERATED values (which are allowed to be different across partitions)
1984 : : * and recheck any RLS policies and constraints. We do not fire any
1985 : : * BEFORE triggers of the new partition, however.
1986 : : */
552 john.naylor@postgres 1987 : 156781 : lreplace:
1988 : : /* Fill in GENERATEd columns */
405 tgl@sss.pgh.pa.us 1989 : 156781 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1990 : :
1991 : : /* ensure slot is independent, consider e.g. EPQ */
759 alvherre@alvh.no-ip. 1992 : 156781 : ExecMaterializeSlot(slot);
1993 : :
1994 : : /*
1995 : : * If partition constraint fails, this row might get moved to another
1996 : : * partition, in which case we should check the RLS CHECK policy just
1997 : : * before inserting into the new partition, rather than doing it here.
1998 : : * This is because a trigger on that partition might again change the row.
1999 : : * So skip the WCO checks if the partition constraint fails.
2000 : : */
2001 : 156781 : partition_constraint_failed =
2002 [ + + ]: 158092 : resultRelationDesc->rd_rel->relispartition &&
2003 [ + + ]: 1311 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2004 : :
2005 : : /* Check any RLS UPDATE WITH CHECK policies */
2006 [ + + ]: 156781 : if (!partition_constraint_failed &&
2007 [ + + ]: 156275 : resultRelInfo->ri_WithCheckOptions != NIL)
2008 : : {
2009 : : /*
2010 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2011 : : * we are looking for at this point.
2012 : : */
2013 : 237 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2014 : : resultRelInfo, slot, estate);
2015 : : }
2016 : :
2017 : : /*
2018 : : * If a partition check failed, try to move the row into the right
2019 : : * partition.
2020 : : */
2021 [ + + ]: 156754 : if (partition_constraint_failed)
2022 : : {
2023 : : TupleTableSlot *inserted_tuple,
2024 : : *retry_slot;
756 2025 : 506 : ResultRelInfo *insert_destrel = NULL;
2026 : :
2027 : : /*
2028 : : * ExecCrossPartitionUpdate will first DELETE the row from the
2029 : : * partition it's currently in and then insert it back into the root
2030 : : * table, which will re-route it to the correct partition. However,
2031 : : * if the tuple has been concurrently updated, a retry is needed.
2032 : : */
759 2033 [ + + ]: 506 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2034 : : tupleid, oldtuple, slot,
2035 : : canSetTag, updateCxt,
2036 : : &result,
2037 : : &retry_slot,
2038 : : &inserted_tuple,
2039 : : &insert_destrel))
2040 : : {
2041 : : /* success! */
2042 : 412 : updateCxt->crossPartUpdate = true;
2043 : :
2044 : : /*
2045 : : * If the partitioned table being updated is referenced in foreign
2046 : : * keys, queue up trigger events to check that none of them were
2047 : : * violated. No special treatment is needed in
2048 : : * non-cross-partition update situations, because the leaf
2049 : : * partition's AR update triggers will take care of that. During
2050 : : * cross-partition updates implemented as delete on the source
2051 : : * partition followed by insert on the destination partition,
2052 : : * AR-UPDATE triggers of the root table (that is, the table
2053 : : * mentioned in the query) must be fired.
2054 : : *
2055 : : * NULL insert_destrel means that the move failed to occur, that
2056 : : * is, the update failed, so no need to anything in that case.
2057 : : */
756 2058 [ + + ]: 412 : if (insert_destrel &&
2059 [ + + ]: 368 : resultRelInfo->ri_TrigDesc &&
2060 [ + + ]: 169 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2061 : 138 : ExecCrossPartitionUpdateForeignKey(context,
2062 : : resultRelInfo,
2063 : : insert_destrel,
2064 : : tupleid, slot,
2065 : : inserted_tuple);
2066 : :
759 2067 : 414 : return TM_Ok;
2068 : : }
2069 : :
2070 : : /*
2071 : : * No luck, a retry is needed. If running MERGE, we do not do so
2072 : : * here; instead let it handle that on its own rules.
2073 : : */
28 dean.a.rasheed@gmail 2074 [ + + ]:GNC 8 : if (context->mtstate->operation == CMD_MERGE)
115 dean.a.rasheed@gmail 2075 :CBC 5 : return result;
2076 : :
2077 : : /*
2078 : : * ExecCrossPartitionUpdate installed an updated version of the new
2079 : : * tuple in the retry slot; start over.
2080 : : */
398 2081 : 3 : slot = retry_slot;
759 alvherre@alvh.no-ip. 2082 : 3 : goto lreplace;
2083 : : }
2084 : :
2085 : : /*
2086 : : * Check the constraints of the tuple. We've already checked the
2087 : : * partition constraint above; however, we must still ensure the tuple
2088 : : * passes all other constraints, so we will call ExecConstraints() and
2089 : : * have it validate all remaining checks.
2090 : : */
2091 [ + + ]: 156248 : if (resultRelationDesc->rd_att->constr)
2092 : 93057 : ExecConstraints(resultRelInfo, slot, estate);
2093 : :
2094 : : /*
2095 : : * replace the heap tuple
2096 : : *
2097 : : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2098 : : * the row to be updated is visible to that snapshot, and throw a
2099 : : * can't-serialize error if not. This is a special-case behavior needed
2100 : : * for referential integrity updates in transaction-snapshot mode
2101 : : * transactions.
2102 : : */
2103 : 156223 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2104 : : estate->es_output_cid,
2105 : : estate->es_snapshot,
2106 : : estate->es_crosscheck_snapshot,
2107 : : true /* wait for commit */ ,
2108 : : &context->tmfd, &updateCxt->lockmode,
2109 : : &updateCxt->updateIndexes);
2110 : :
2111 : 156211 : return result;
2112 : : }
2113 : :
2114 : : /*
2115 : : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2116 : : *
2117 : : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2118 : : * returns indicating that the tuple was updated.
2119 : : */
2120 : : static void
2121 : 156211 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2122 : : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2123 : : HeapTuple oldtuple, TupleTableSlot *slot)
2124 : : {
2125 : 156211 : ModifyTableState *mtstate = context->mtstate;
398 dean.a.rasheed@gmail 2126 : 156211 : List *recheckIndexes = NIL;
2127 : :
2128 : : /* insert index entries for tuple if necessary */
391 tomas.vondra@postgre 2129 [ + + + + ]: 156211 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
759 alvherre@alvh.no-ip. 2130 : 84571 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2131 : : slot, context->estate,
2132 : : true, false,
2133 : : NULL, NIL,
391 tomas.vondra@postgre 2134 : 84571 : (updateCxt->updateIndexes == TU_Summarizing));
2135 : :
2136 : : /* AFTER ROW UPDATE Triggers */
759 alvherre@alvh.no-ip. 2137 : 156199 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2138 : : NULL, NULL,
2139 : : tupleid, oldtuple, slot,
2140 : : recheckIndexes,
2141 [ + + ]: 156199 : mtstate->operation == CMD_INSERT ?
2142 : : mtstate->mt_oc_transition_capture :
2143 : : mtstate->mt_transition_capture,
2144 : : false);
2145 : :
398 dean.a.rasheed@gmail 2146 : 156199 : list_free(recheckIndexes);
2147 : :
2148 : : /*
2149 : : * Check any WITH CHECK OPTION constraints from parent views. We are
2150 : : * required to do this after testing all constraints and uniqueness
2151 : : * violations per the SQL spec, so we do it after actually updating the
2152 : : * record in the heap and all indexes.
2153 : : *
2154 : : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2155 : : * are looking for at this point.
2156 : : */
759 alvherre@alvh.no-ip. 2157 [ + + ]: 156199 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2158 : 224 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2159 : : slot, context->estate);
2160 : 156161 : }
2161 : :
2162 : : /*
2163 : : * Queues up an update event using the target root partitioned table's
2164 : : * trigger to check that a cross-partition update hasn't broken any foreign
2165 : : * keys pointing into it.
2166 : : */
2167 : : static void
756 2168 : 138 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2169 : : ResultRelInfo *sourcePartInfo,
2170 : : ResultRelInfo *destPartInfo,
2171 : : ItemPointer tupleid,
2172 : : TupleTableSlot *oldslot,
2173 : : TupleTableSlot *newslot)
2174 : : {
2175 : : ListCell *lc;
2176 : : ResultRelInfo *rootRelInfo;
2177 : : List *ancestorRels;
2178 : :
2179 : 138 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2180 : 138 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2181 : :
2182 : : /*
2183 : : * For any foreign keys that point directly into a non-root ancestors of
2184 : : * the source partition, we can in theory fire an update event to enforce
2185 : : * those constraints using their triggers, if we could tell that both the
2186 : : * source and the destination partitions are under the same ancestor. But
2187 : : * for now, we simply report an error that those cannot be enforced.
2188 : : */
2189 [ + - + + : 303 : foreach(lc, ancestorRels)
+ + ]
2190 : : {
2191 : 168 : ResultRelInfo *rInfo = lfirst(lc);
2192 : 168 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2193 : 168 : bool has_noncloned_fkey = false;
2194 : :
2195 : : /* Root ancestor's triggers will be processed. */
2196 [ + + ]: 168 : if (rInfo == rootRelInfo)
2197 : 135 : continue;
2198 : :
2199 [ + - + - ]: 33 : if (trigdesc && trigdesc->trig_update_after_row)
2200 : : {
2201 [ + + ]: 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2202 : : {
2203 : 84 : Trigger *trig = &trigdesc->triggers[i];
2204 : :
2205 [ + + + - ]: 87 : if (!trig->tgisclone &&
2206 : 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2207 : : {
2208 : 3 : has_noncloned_fkey = true;
2209 : 3 : break;
2210 : : }
2211 : : }
2212 : : }
2213 : :
2214 [ + + ]: 33 : if (has_noncloned_fkey)
2215 [ + - ]: 3 : ereport(ERROR,
2216 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2217 : : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2218 : : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2219 : : RelationGetRelationName(rInfo->ri_RelationDesc),
2220 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2221 : : errhint("Consider defining the foreign key on table \"%s\".",
2222 : : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2223 : : }
2224 : :
2225 : : /* Perform the root table's triggers. */
2226 : 135 : ExecARUpdateTriggers(context->estate,
2227 : : rootRelInfo, sourcePartInfo, destPartInfo,
2228 : : tupleid, NULL, newslot, NIL, NULL, true);
2229 : 135 : }
2230 : :
2231 : : /* ----------------------------------------------------------------
2232 : : * ExecUpdate
2233 : : *
2234 : : * note: we can't run UPDATE queries with transactions
2235 : : * off because UPDATEs are actually INSERTs and our
2236 : : * scan will mistakenly loop forever, updating the tuple
2237 : : * it just inserted.. This should be fixed but until it
2238 : : * is, we don't want to get stuck in an infinite loop
2239 : : * which corrupts your database..
2240 : : *
2241 : : * When updating a table, tupleid identifies the tuple to
2242 : : * update and oldtuple is NULL. When updating a view, oldtuple
2243 : : * is passed to the INSTEAD OF triggers and identifies what to
2244 : : * update, and tupleid is invalid. When updating a foreign table,
2245 : : * tupleid is invalid; the FDW has to figure out which row to
2246 : : * update using data from the planSlot. oldtuple is passed to
2247 : : * foreign table triggers; it is NULL when the foreign table has
2248 : : * no relevant triggers.
2249 : : *
2250 : : * slot contains the new tuple value to be stored.
2251 : : * planSlot is the output of the ModifyTable's subplan; we use it
2252 : : * to access values from other input tables (for RETURNING),
2253 : : * row-ID junk columns, etc.
2254 : : *
2255 : : * Returns RETURNING result if any, otherwise NULL.
2256 : : * ----------------------------------------------------------------
2257 : : */
2258 : : static TupleTableSlot *
759 2259 : 155936 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2260 : : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2261 : : bool canSetTag)
2262 : : {
2263 : 155936 : EState *estate = context->estate;
1278 heikki.linnakangas@i 2264 : 155936 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
759 alvherre@alvh.no-ip. 2265 : 155936 : UpdateContext updateCxt = {0};
2266 : : TM_Result result;
2267 : :
2268 : : /*
2269 : : * abort the operation if not running transactions
2270 : : */
5300 tgl@sss.pgh.pa.us 2271 [ - + ]: 155936 : if (IsBootstrapProcessingMode())
5300 tgl@sss.pgh.pa.us 2272 [ # # ]:UBC 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2273 : :
2274 : : /*
2275 : : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2276 : : * done if it says we are.
2277 : : */
398 dean.a.rasheed@gmail 2278 [ + + ]:CBC 155936 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
759 alvherre@alvh.no-ip. 2279 : 69 : return NULL;
2280 : :
2281 : : /* INSTEAD OF ROW UPDATE Triggers */
4935 tgl@sss.pgh.pa.us 2282 [ + + ]: 155849 : if (resultRelInfo->ri_TrigDesc &&
2283 [ + + ]: 2813 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2284 : : {
1874 andres@anarazel.de 2285 [ + + ]: 57 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2286 : : oldtuple, slot))
1789 tgl@sss.pgh.pa.us 2287 : 9 : return NULL; /* "do nothing" */
2288 : : }
4053 2289 [ + + ]: 155792 : else if (resultRelInfo->ri_FdwRoutine)
2290 : : {
2291 : : /* Fill in GENERATEd columns */
759 alvherre@alvh.no-ip. 2292 : 71 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2293 : :
2294 : : /*
2295 : : * update in foreign table: let the FDW do it
2296 : : */
4053 tgl@sss.pgh.pa.us 2297 : 71 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2298 : : resultRelInfo,
2299 : : slot,
2300 : : context->planSlot);
2301 : :
2302 [ + + ]: 71 : if (slot == NULL) /* "do nothing" */
2303 : 1 : return NULL;
2304 : :
2305 : : /*
2306 : : * AFTER ROW Triggers or RETURNING expressions might reference the
2307 : : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2308 : : * them. (This covers the case where the FDW replaced the slot.)
2309 : : */
1874 andres@anarazel.de 2310 : 70 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2311 : : }
2312 : : else
2313 : : {
2314 : : /*
2315 : : * If we generate a new candidate tuple after EvalPlanQual testing, we
2316 : : * must loop back here to try again. (We don't need to redo triggers,
2317 : : * however. If there are any BEFORE triggers then trigger.c will have
2318 : : * done table_tuple_lock to lock the correct tuple, so there's no need
2319 : : * to do them again.)
2320 : : */
759 alvherre@alvh.no-ip. 2321 : 155721 : redo_act:
2322 : 155773 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2323 : : canSetTag, &updateCxt);
2324 : :
2325 : : /*
2326 : : * If ExecUpdateAct reports that a cross-partition update was done,
2327 : : * then the RETURNING tuple (if any) has been projected and there's
2328 : : * nothing else for us to do.
2329 : : */
2330 [ + + ]: 155630 : if (updateCxt.crossPartUpdate)
2331 : 342 : return context->cpUpdateReturningSlot;
2332 : :
4935 tgl@sss.pgh.pa.us 2333 [ + + + + : 155288 : switch (result)
- ]
2334 : : {
1849 andres@anarazel.de 2335 : 42 : case TM_SelfModified:
2336 : :
2337 : : /*
2338 : : * The target tuple was already updated or deleted by the
2339 : : * current command, or by a later command in the current
2340 : : * transaction. The former case is possible in a join UPDATE
2341 : : * where multiple tuples join to the same target tuple. This
2342 : : * is pretty questionable, but Postgres has always allowed it:
2343 : : * we just execute the first update action and ignore
2344 : : * additional update attempts.
2345 : : *
2346 : : * The latter case arises if the tuple is modified by a
2347 : : * command in a BEFORE trigger, or perhaps by a command in a
2348 : : * volatile function used in the query. In such situations we
2349 : : * should not ignore the update, but it is equally unsafe to
2350 : : * proceed. We don't want to discard the original UPDATE
2351 : : * while keeping the triggered actions based on it; and we
2352 : : * have no principled way to merge this update with the
2353 : : * previous ones. So throwing an error is the only safe
2354 : : * course.
2355 : : *
2356 : : * If a trigger actually intends this type of interaction, it
2357 : : * can re-execute the UPDATE (assuming it can figure out how)
2358 : : * and then return NULL to cancel the outer update.
2359 : : */
759 alvherre@alvh.no-ip. 2360 [ + + ]: 42 : if (context->tmfd.cmax != estate->es_output_cid)
4188 kgrittn@postgresql.o 2361 [ + - ]: 3 : ereport(ERROR,
2362 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2363 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2364 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2365 : :
2366 : : /* Else, already updated by self; nothing to do */
4935 tgl@sss.pgh.pa.us 2367 : 39 : return NULL;
2368 : :
1849 andres@anarazel.de 2369 : 155164 : case TM_Ok:
4935 tgl@sss.pgh.pa.us 2370 : 155164 : break;
2371 : :
1849 andres@anarazel.de 2372 : 78 : case TM_Updated:
2373 : : {
2374 : : TupleTableSlot *inputslot;
2375 : : TupleTableSlot *epqslot;
2376 : : TupleTableSlot *oldSlot;
2377 : :
2378 [ + + ]: 78 : if (IsolationUsesXactSnapshot())
2379 [ + - ]: 2 : ereport(ERROR,
2380 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2381 : : errmsg("could not serialize access due to concurrent update")));
2382 : :
2383 : : /*
2384 : : * Already know that we're going to need to do EPQ, so
2385 : : * fetch tuple directly into the right slot.
2386 : : */
3 akorotkov@postgresql 2387 : 76 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2388 : : resultRelInfo->ri_RangeTableIndex);
2389 : :
2390 : 76 : result = table_tuple_lock(resultRelationDesc, tupleid,
2391 : : estate->es_snapshot,
2392 : : inputslot, estate->es_output_cid,
2393 : : updateCxt.lockmode, LockWaitBlock,
2394 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2395 : : &context->tmfd);
2396 : :
2397 [ + + + - ]: 74 : switch (result)
2398 : : {
2399 : 69 : case TM_Ok:
2400 [ - + ]: 69 : Assert(context->tmfd.traversed);
2401 : :
2402 : 69 : epqslot = EvalPlanQual(context->epqstate,
2403 : : resultRelationDesc,
2404 : : resultRelInfo->ri_RangeTableIndex,
2405 : : inputslot);
2406 [ + + + + ]: 69 : if (TupIsNull(epqslot))
2407 : : /* Tuple not passing quals anymore, exiting... */
2408 : 17 : return NULL;
2409 : :
2410 : : /* Make sure ri_oldTupleSlot is initialized. */
2411 [ - + ]: 52 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3 akorotkov@postgresql 2412 :UBC 0 : ExecInitUpdateProjection(context->mtstate,
2413 : : resultRelInfo);
2414 : :
2415 : : /* Fetch the most recent version of old tuple. */
3 akorotkov@postgresql 2416 :CBC 52 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2417 [ - + ]: 52 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2418 : : tupleid,
2419 : : SnapshotAny,
2420 : : oldSlot))
3 akorotkov@postgresql 2421 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
3 akorotkov@postgresql 2422 :CBC 52 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2423 : : epqslot, oldSlot);
2424 : 52 : goto redo_act;
2425 : :
2426 : 1 : case TM_Deleted:
2427 : : /* tuple already deleted; nothing to do */
2428 : 1 : return NULL;
2429 : :
2430 : 4 : case TM_SelfModified:
2431 : :
2432 : : /*
2433 : : * This can be reached when following an update
2434 : : * chain from a tuple updated by another session,
2435 : : * reaching a tuple that was already updated in
2436 : : * this transaction. If previously modified by
2437 : : * this command, ignore the redundant update,
2438 : : * otherwise error out.
2439 : : *
2440 : : * See also TM_SelfModified response to
2441 : : * table_tuple_update() above.
2442 : : */
2443 [ + + ]: 4 : if (context->tmfd.cmax != estate->es_output_cid)
2444 [ + - ]: 1 : ereport(ERROR,
2445 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2446 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2447 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2448 : 3 : return NULL;
2449 : :
3 akorotkov@postgresql 2450 :UBC 0 : default:
2451 : : /* see table_tuple_lock call in ExecDelete() */
2452 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2453 : : result);
2454 : : return NULL;
2455 : : }
2456 : : }
2457 : :
2458 : : break;
2459 : :
1849 andres@anarazel.de 2460 :CBC 4 : case TM_Deleted:
2461 [ - + ]: 4 : if (IsolationUsesXactSnapshot())
1849 andres@anarazel.de 2462 [ # # ]:UBC 0 : ereport(ERROR,
2463 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2464 : : errmsg("could not serialize access due to concurrent delete")));
2465 : : /* tuple already deleted; nothing to do */
4935 tgl@sss.pgh.pa.us 2466 :CBC 4 : return NULL;
2467 : :
4935 tgl@sss.pgh.pa.us 2468 :UBC 0 : default:
1788 andres@anarazel.de 2469 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2470 : : result);
2471 : : return NULL;
2472 : : }
2473 : : }
2474 : :
4797 tgl@sss.pgh.pa.us 2475 [ + + ]:CBC 155279 : if (canSetTag)
2476 : 154983 : (estate->es_processed)++;
2477 : :
759 alvherre@alvh.no-ip. 2478 : 155279 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2479 : : slot);
2480 : :
2481 : : /* Process RETURNING if present */
5300 tgl@sss.pgh.pa.us 2482 [ + + ]: 155235 : if (resultRelInfo->ri_projectReturning)
759 alvherre@alvh.no-ip. 2483 : 1080 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2484 : :
5300 tgl@sss.pgh.pa.us 2485 : 154155 : return NULL;
2486 : : }
2487 : :
2488 : : /*
2489 : : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2490 : : *
2491 : : * Try to lock tuple for update as part of speculative insertion. If
2492 : : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2493 : : * (but still lock row, even though it may not satisfy estate's
2494 : : * snapshot).
2495 : : *
2496 : : * Returns true if we're done (with or without an update), or false if
2497 : : * the caller must retry the INSERT from scratch.
2498 : : */
2499 : : static bool
759 alvherre@alvh.no-ip. 2500 : 2600 : ExecOnConflictUpdate(ModifyTableContext *context,
2501 : : ResultRelInfo *resultRelInfo,
2502 : : ItemPointer conflictTid,
2503 : : TupleTableSlot *excludedSlot,
2504 : : bool canSetTag,
2505 : : TupleTableSlot **returning)
2506 : : {
2507 : 2600 : ModifyTableState *mtstate = context->mtstate;
3264 andres@anarazel.de 2508 : 2600 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2509 : 2600 : Relation relation = resultRelInfo->ri_RelationDesc;
2211 alvherre@alvh.no-ip. 2510 : 2600 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
1866 andres@anarazel.de 2511 : 2600 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2512 : : TM_FailureData tmfd;
2513 : : LockTupleMode lockmode;
2514 : : TM_Result test;
2515 : : Datum xminDatum;
2516 : : TransactionId xmin;
2517 : : bool isnull;
2518 : :
2519 : : /* Determine lock mode to use */
759 alvherre@alvh.no-ip. 2520 : 2600 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2521 : :
2522 : : /*
2523 : : * Lock tuple for update. Don't follow updates when tuple cannot be
2524 : : * locked without doing so. A row locking conflict here means our
2525 : : * previous conclusion that the tuple is conclusively committed is not
2526 : : * true anymore.
2527 : : */
1788 andres@anarazel.de 2528 : 2600 : test = table_tuple_lock(relation, conflictTid,
759 alvherre@alvh.no-ip. 2529 : 2600 : context->estate->es_snapshot,
2530 : 2600 : existing, context->estate->es_output_cid,
2531 : : lockmode, LockWaitBlock, 0,
2532 : : &tmfd);
3264 andres@anarazel.de 2533 [ + + - - : 2600 : switch (test)
- - ]
2534 : : {
1849 2535 : 2588 : case TM_Ok:
2536 : : /* success! */
3264 2537 : 2588 : break;
2538 : :
1849 2539 : 12 : case TM_Invisible:
2540 : :
2541 : : /*
2542 : : * This can occur when a just inserted tuple is updated again in
2543 : : * the same command. E.g. because multiple rows with the same
2544 : : * conflicting key values are inserted.
2545 : : *
2546 : : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2547 : : * case. We do not want to proceed because it would lead to the
2548 : : * same row being updated a second time in some unspecified order,
2549 : : * and in contrast to plain UPDATEs there's no historical behavior
2550 : : * to break.
2551 : : *
2552 : : * It is the user's responsibility to prevent this situation from
2553 : : * occurring. These problems are why the SQL standard similarly
2554 : : * specifies that for SQL MERGE, an exception must be raised in
2555 : : * the event of an attempt to update the same row twice.
2556 : : */
2557 : 12 : xminDatum = slot_getsysattr(existing,
2558 : : MinTransactionIdAttributeNumber,
2559 : : &isnull);
2560 [ - + ]: 12 : Assert(!isnull);
2561 : 12 : xmin = DatumGetTransactionId(xminDatum);
2562 : :
2563 [ + - ]: 12 : if (TransactionIdIsCurrentTransactionId(xmin))
3264 2564 [ + - ]: 12 : ereport(ERROR,
2565 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2566 : : /* translator: %s is a SQL command name */
2567 : : errmsg("%s command cannot affect row a second time",
2568 : : "ON CONFLICT DO UPDATE"),
2569 : : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2570 : :
2571 : : /* This shouldn't happen */
3264 andres@anarazel.de 2572 [ # # ]:UBC 0 : elog(ERROR, "attempted to lock invisible tuple");
2573 : : break;
2574 : :
1849 2575 : 0 : case TM_SelfModified:
2576 : :
2577 : : /*
2578 : : * This state should never be reached. As a dirty snapshot is used
2579 : : * to find conflicting tuples, speculative insertion wouldn't have
2580 : : * seen this row to conflict with.
2581 : : */
3264 2582 [ # # ]: 0 : elog(ERROR, "unexpected self-updated tuple");
2583 : : break;
2584 : :
1849 2585 : 0 : case TM_Updated:
3264 2586 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2587 [ # # ]: 0 : ereport(ERROR,
2588 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2589 : : errmsg("could not serialize access due to concurrent update")));
2590 : :
2591 : : /*
2592 : : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2593 : : * a partitioned table we shouldn't reach to a case where tuple to
2594 : : * be lock is moved to another partition due to concurrent update
2595 : : * of the partition key.
2596 : : */
1849 2597 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2598 : :
2599 : : /*
2600 : : * Tell caller to try again from the very start.
2601 : : *
2602 : : * It does not make sense to use the usual EvalPlanQual() style
2603 : : * loop here, as the new version of the row might not conflict
2604 : : * anymore, or the conflicting tuple has actually been deleted.
2605 : : */
2606 : 0 : ExecClearTuple(existing);
2607 : 0 : return false;
2608 : :
2609 : 0 : case TM_Deleted:
2610 [ # # ]: 0 : if (IsolationUsesXactSnapshot())
2611 [ # # ]: 0 : ereport(ERROR,
2612 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2613 : : errmsg("could not serialize access due to concurrent delete")));
2614 : :
2615 : : /* see TM_Updated case */
2616 [ # # ]: 0 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2617 : 0 : ExecClearTuple(existing);
3264 2618 : 0 : return false;
2619 : :
2620 : 0 : default:
1788 2621 [ # # ]: 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2622 : : }
2623 : :
2624 : : /* Success, the tuple is locked. */
2625 : :
2626 : : /*
2627 : : * Verify that the tuple is visible to our MVCC snapshot if the current
2628 : : * isolation level mandates that.
2629 : : *
2630 : : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2631 : : * CONFLICT ... WHERE clause may prevent us from reaching that.
2632 : : *
2633 : : * This means we only ever continue when a new command in the current
2634 : : * transaction could see the row, even though in READ COMMITTED mode the
2635 : : * tuple will not be visible according to the current statement's
2636 : : * snapshot. This is in line with the way UPDATE deals with newer tuple
2637 : : * versions.
2638 : : */
759 alvherre@alvh.no-ip. 2639 :CBC 2588 : ExecCheckTupleVisible(context->estate, relation, existing);
2640 : :
2641 : : /*
2642 : : * Make tuple and any needed join variables available to ExecQual and
2643 : : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2644 : : * the target's existing tuple is installed in the scantuple. EXCLUDED
2645 : : * has been made to reference INNER_VAR in setrefs.c, but there is no
2646 : : * other redirection.
2647 : : */
1866 andres@anarazel.de 2648 : 2588 : econtext->ecxt_scantuple = existing;
3264 2649 : 2588 : econtext->ecxt_innertuple = excludedSlot;
2650 : 2588 : econtext->ecxt_outertuple = NULL;
2651 : :
2588 2652 [ + + ]: 2588 : if (!ExecQual(onConflictSetWhere, econtext))
2653 : : {
1866 2654 : 16 : ExecClearTuple(existing); /* see return below */
3264 2655 [ - + ]: 16 : InstrCountFiltered1(&mtstate->ps, 1);
2656 : 16 : return true; /* done with the tuple */
2657 : : }
2658 : :
2659 [ + + ]: 2572 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2660 : : {
2661 : : /*
2662 : : * Check target's existing tuple against UPDATE-applicable USING
2663 : : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2664 : : *
2665 : : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2666 : : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2667 : : * but that's almost the extent of its special handling for ON
2668 : : * CONFLICT DO UPDATE.
2669 : : *
2670 : : * The rewriter will also have associated UPDATE applicable straight
2671 : : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2672 : : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2673 : : * kinds, so there is no danger of spurious over-enforcement in the
2674 : : * INSERT or UPDATE path.
2675 : : */
2676 : 30 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2677 : : existing,
2678 : : mtstate->ps.state);
2679 : : }
2680 : :
2681 : : /* Project the new tuple version */
2211 alvherre@alvh.no-ip. 2682 : 2560 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2683 : :
2684 : : /*
2685 : : * Note that it is possible that the target tuple has been modified in
2686 : : * this session, after the above table_tuple_lock. We choose to not error
2687 : : * out in that case, in line with ExecUpdate's treatment of similar cases.
2688 : : * This can happen if an UPDATE is triggered from within ExecQual(),
2689 : : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2690 : : * wCTE in the ON CONFLICT's SET.
2691 : : */
2692 : :
2693 : : /* Execute UPDATE with projection */
759 2694 : 5105 : *returning = ExecUpdate(context, resultRelInfo,
2695 : : conflictTid, NULL,
1866 andres@anarazel.de 2696 : 2560 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2697 : : canSetTag);
2698 : :
2699 : : /*
2700 : : * Clear out existing tuple, as there might not be another conflict among
2701 : : * the next input rows. Don't want to hold resources till the end of the
2702 : : * query.
2703 : : */
2704 : 2545 : ExecClearTuple(existing);
3264 2705 : 2545 : return true;
2706 : : }
2707 : :
2708 : : /*
2709 : : * Perform MERGE.
2710 : : */
2711 : : static TupleTableSlot *
748 alvherre@alvh.no-ip. 2712 : 3506 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2713 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2714 : : {
28 dean.a.rasheed@gmail 2715 :GNC 3506 : TupleTableSlot *rslot = NULL;
2716 : : bool matched;
2717 : :
2718 : : /*-----
2719 : : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2720 : : * valid, depending on whether the result relation is a table or a view.
2721 : : * We execute the first action for which the additional WHEN MATCHED AND
2722 : : * quals pass. If an action without quals is found, that action is
2723 : : * executed.
2724 : : *
2725 : : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2726 : : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2727 : : * in sequence until one passes. This is almost identical to the WHEN
2728 : : * MATCHED case, and both cases are handled by ExecMergeMatched().
2729 : : *
2730 : : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2731 : : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2732 : : * TARGET] actions in sequence until one passes.
2733 : : *
2734 : : * Things get interesting in case of concurrent update/delete of the
2735 : : * target tuple. Such concurrent update/delete is detected while we are
2736 : : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2737 : : *
2738 : : * A concurrent update can:
2739 : : *
2740 : : * 1. modify the target tuple so that the results from checking any
2741 : : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2742 : : * SOURCE actions potentially change, but the result from the join
2743 : : * quals does not change.
2744 : : *
2745 : : * In this case, we are still dealing with the same kind of match
2746 : : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2747 : : * actions from the start and choose the first one that satisfies the
2748 : : * new target tuple.
2749 : : *
2750 : : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2751 : : * quals no longer pass and hence the source and target tuples no
2752 : : * longer match.
2753 : : *
2754 : : * In this case, we are now dealing with a NOT MATCHED case, and we
2755 : : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2756 : : * TARGET] actions. First ExecMergeMatched() processes the list of
2757 : : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2758 : : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2759 : : * TARGET] actions in sequence until one passes. Thus we may execute
2760 : : * two actions; one of each kind.
2761 : : *
2762 : : * Thus we support concurrent updates that turn MATCHED candidate rows
2763 : : * into NOT MATCHED rows. However, we do not attempt to support cases
2764 : : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2765 : : * cause a target row to match a different source row.
2766 : : *
2767 : : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2768 : : * [BY TARGET].
2769 : : *
2770 : : * ExecMergeMatched() takes care of following the update chain and
2771 : : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2772 : : * action, as long as the target tuple still exists. If the target tuple
2773 : : * gets deleted or a concurrent update causes the join quals to fail, it
2774 : : * returns a matched status of false and we call ExecMergeNotMatched().
2775 : : * Given that ExecMergeMatched() always makes progress by following the
2776 : : * update chain and we never switch from ExecMergeNotMatched() to
2777 : : * ExecMergeMatched(), there is no risk of a livelock.
2778 : : */
45 2779 [ + + + + ]: 3506 : matched = tupleid != NULL || oldtuple != NULL;
748 alvherre@alvh.no-ip. 2780 [ + + ]:CBC 3506 : if (matched)
28 dean.a.rasheed@gmail 2781 :GNC 2196 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2782 : : canSetTag, &matched);
2783 : :
2784 : : /*
2785 : : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2786 : : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2787 : : * "matched" to false, indicating that it no longer matches).
2788 : : */
748 alvherre@alvh.no-ip. 2789 [ + + ]:CBC 3461 : if (!matched)
2790 : : {
2791 : : /*
2792 : : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2793 : : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2794 : : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2795 : : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2796 : : * SOURCE action, and computed the row to return. If so, we cannot
2797 : : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2798 : : * pending (to be processed on the next call to ExecModifyTable()).
2799 : : * Otherwise, just process the action now.
2800 : : */
15 dean.a.rasheed@gmail 2801 [ + + ]:GNC 1318 : if (rslot == NULL)
2802 : 1317 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2803 : : else
2804 : 1 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2805 : : }
2806 : :
28 2807 : 3434 : return rslot;
2808 : : }
2809 : :
2810 : : /*
2811 : : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
2812 : : * action, depending on whether the join quals are satisfied. If the target
2813 : : * relation is a table, the current target tuple is identified by tupleid.
2814 : : * Otherwise, if the target relation is a view, oldtuple is the current target
2815 : : * tuple from the view.
2816 : : *
2817 : : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
2818 : : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
2819 : : * action do not pass, we check the second, then the third and so on. If we
2820 : : * reach the end without finding a qualifying action, we return NULL.
2821 : : * Otherwise, we execute the qualifying action and return its RETURNING
2822 : : * result, if any, or NULL.
2823 : : *
2824 : : * On entry, "*matched" is assumed to be true. If a concurrent update or
2825 : : * delete is detected that causes the join quals to no longer pass, we set it
2826 : : * to false, indicating that the caller should process any NOT MATCHED [BY
2827 : : * TARGET] actions.
2828 : : *
2829 : : * After a concurrent update, we restart from the first action to look for a
2830 : : * new qualifying action to execute. If the join quals originally passed, and
2831 : : * the concurrent update caused them to no longer pass, then we switch from
2832 : : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
2833 : : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
2834 : : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
2835 : : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
2836 : : */
2837 : : static TupleTableSlot *
748 alvherre@alvh.no-ip. 2838 :CBC 2196 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2839 : : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
2840 : : bool *matched)
2841 : : {
2842 : 2196 : ModifyTableState *mtstate = context->mtstate;
15 dean.a.rasheed@gmail 2843 :GNC 2196 : List **mergeActions = resultRelInfo->ri_MergeActions;
2844 : : List *actionStates;
28 2845 : 2196 : TupleTableSlot *newslot = NULL;
2846 : 2196 : TupleTableSlot *rslot = NULL;
748 alvherre@alvh.no-ip. 2847 :CBC 2196 : EState *estate = context->estate;
2848 : 2196 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2849 : : bool isNull;
2850 : 2196 : EPQState *epqstate = &mtstate->mt_epqstate;
2851 : : ListCell *l;
2852 : :
2853 : : /* Expect matched to be true on entry */
15 dean.a.rasheed@gmail 2854 [ - + ]:GNC 2196 : Assert(*matched);
2855 : :
2856 : : /*
2857 : : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
2858 : : * are done.
2859 : : */
2860 [ + + ]: 2196 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
2861 [ + + ]: 600 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
28 2862 : 264 : return NULL;
2863 : :
2864 : : /*
2865 : : * Make tuple and any needed join variables available to ExecQual and
2866 : : * ExecProject. The target's existing tuple is installed in the scantuple.
2867 : : * This target relation's slot is required only in the case of a MATCHED
2868 : : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
2869 : : */
748 alvherre@alvh.no-ip. 2870 :CBC 1932 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2871 : 1932 : econtext->ecxt_innertuple = context->planSlot;
2872 : 1932 : econtext->ecxt_outertuple = NULL;
2873 : :
2874 : : /*
2875 : : * This routine is only invoked for matched target rows, so we should
2876 : : * either have the tupleid of the target row, or an old tuple from the
2877 : : * target wholerow junk attr.
2878 : : */
45 dean.a.rasheed@gmail 2879 [ + + - + ]:GNC 1932 : Assert(tupleid != NULL || oldtuple != NULL);
2880 [ + + ]: 1932 : if (oldtuple != NULL)
2881 : 42 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
2882 : : false);
15 2883 [ - + ]: 1890 : else if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2884 : : tupleid,
2885 : : SnapshotAny,
2886 : : resultRelInfo->ri_oldTupleSlot))
15 dean.a.rasheed@gmail 2887 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch the target tuple");
2888 : :
2889 : : /*
2890 : : * Test the join condition. If it's satisfied, perform a MATCHED action.
2891 : : * Otherwise, perform a NOT MATCHED BY SOURCE action.
2892 : : *
2893 : : * Note that this join condition will be NULL if there are no NOT MATCHED
2894 : : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
2895 : : * need only consider MATCHED actions here.
2896 : : */
15 dean.a.rasheed@gmail 2897 [ + + ]:GNC 1932 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
2898 : 1847 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
2899 : : else
2900 : 85 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
2901 : :
2902 : 1932 : lmerge_matched:
2903 : :
2904 [ + + + + : 2696 : foreach(l, actionStates)
+ + ]
2905 : : {
748 alvherre@alvh.no-ip. 2906 :CBC 1995 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2907 : 1995 : CmdType commandType = relaction->mas_action->commandType;
2908 : : TM_Result result;
2909 : 1995 : UpdateContext updateCxt = {0};
2910 : :
2911 : : /*
2912 : : * Test condition, if any.
2913 : : *
2914 : : * In the absence of any condition, we perform the action
2915 : : * unconditionally (no need to check separately since ExecQual() will
2916 : : * return true if there are no conditions to evaluate).
2917 : : */
2918 [ + + ]: 1995 : if (!ExecQual(relaction->mas_whenqual, econtext))
2919 : 733 : continue;
2920 : :
2921 : : /*
2922 : : * Check if the existing target tuple meets the USING checks of
2923 : : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2924 : : * error.
2925 : : *
2926 : : * The WITH CHECK quals for UPDATE RLS policies are applied in
2927 : : * ExecUpdateAct() and hence we need not do anything special to handle
2928 : : * them.
2929 : : *
2930 : : * NOTE: We must do this after WHEN quals are evaluated, so that we
2931 : : * check policies only when they matter.
2932 : : */
251 dean.a.rasheed@gmail 2933 [ + + + + ]: 1262 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2934 : : {
748 alvherre@alvh.no-ip. 2935 : 45 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2936 : : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2937 : : resultRelInfo,
2938 : : resultRelInfo->ri_oldTupleSlot,
2939 [ + + ]: 45 : context->mtstate->ps.state);
2940 : : }
2941 : :
2942 : : /* Perform stated action */
2943 [ + + + - ]: 1250 : switch (commandType)
2944 : : {
2945 : 1053 : case CMD_UPDATE:
2946 : :
2947 : : /*
2948 : : * Project the output tuple, and use that to update the table.
2949 : : * We don't need to filter out junk attributes, because the
2950 : : * UPDATE action's targetlist doesn't have any.
2951 : : */
2952 : 1053 : newslot = ExecProject(relaction->mas_proj);
2953 : :
28 dean.a.rasheed@gmail 2954 :GNC 1053 : mtstate->mt_merge_action = relaction;
748 alvherre@alvh.no-ip. 2955 [ + + ]:CBC 1053 : if (!ExecUpdatePrologue(context, resultRelInfo,
2956 : : tupleid, NULL, newslot, &result))
2957 : : {
398 dean.a.rasheed@gmail 2958 [ + + ]: 9 : if (result == TM_Ok)
28 dean.a.rasheed@gmail 2959 :GNC 78 : return NULL; /* "do nothing" */
2960 : :
398 dean.a.rasheed@gmail 2961 :CBC 6 : break; /* concurrent update/delete */
2962 : : }
2963 : :
2964 : : /* INSTEAD OF ROW UPDATE Triggers */
45 dean.a.rasheed@gmail 2965 [ + + ]:GNC 1044 : if (resultRelInfo->ri_TrigDesc &&
2966 [ + + ]: 167 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2967 : : {
2968 [ - + ]: 39 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2969 : : oldtuple, newslot))
28 dean.a.rasheed@gmail 2970 :UNC 0 : return NULL; /* "do nothing" */
2971 : : }
2972 : : else
2973 : : {
45 dean.a.rasheed@gmail 2974 :GNC 1005 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
2975 : : NULL, newslot, canSetTag,
2976 : : &updateCxt);
2977 : :
2978 : : /*
2979 : : * As in ExecUpdate(), if ExecUpdateAct() reports that a
2980 : : * cross-partition update was done, then there's nothing
2981 : : * else for us to do --- the UPDATE has been turned into a
2982 : : * DELETE and an INSERT, and we must not perform any of
2983 : : * the usual post-update tasks. Also, the RETURNING tuple
2984 : : * (if any) has been projected, so we can just return
2985 : : * that.
2986 : : */
2987 [ + + ]: 995 : if (updateCxt.crossPartUpdate)
2988 : : {
2989 : 67 : mtstate->mt_merge_updated += 1;
28 2990 : 67 : return context->cpUpdateReturningSlot;
2991 : : }
2992 : : }
2993 : :
45 2994 [ + + ]: 967 : if (result == TM_Ok)
2995 : : {
748 alvherre@alvh.no-ip. 2996 :CBC 932 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2997 : : tupleid, NULL, newslot);
2998 : 926 : mtstate->mt_merge_updated += 1;
2999 : : }
3000 : 961 : break;
3001 : :
3002 : 188 : case CMD_DELETE:
28 dean.a.rasheed@gmail 3003 :GNC 188 : mtstate->mt_merge_action = relaction;
748 alvherre@alvh.no-ip. 3004 [ + + ]:CBC 188 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3005 : : NULL, NULL, &result))
3006 : : {
398 dean.a.rasheed@gmail 3007 [ + + ]: 6 : if (result == TM_Ok)
28 dean.a.rasheed@gmail 3008 :GNC 3 : return NULL; /* "do nothing" */
3009 : :
398 dean.a.rasheed@gmail 3010 :CBC 3 : break; /* concurrent update/delete */
3011 : : }
3012 : :
3013 : : /* INSTEAD OF ROW DELETE Triggers */
45 dean.a.rasheed@gmail 3014 [ + + ]:GNC 182 : if (resultRelInfo->ri_TrigDesc &&
3015 [ + + ]: 22 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3016 : : {
3017 [ - + ]: 3 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3018 : : oldtuple))
28 dean.a.rasheed@gmail 3019 :UNC 0 : return NULL; /* "do nothing" */
3020 : : }
3021 : : else
45 dean.a.rasheed@gmail 3022 :GNC 179 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3023 : : false);
3024 : :
748 alvherre@alvh.no-ip. 3025 [ + + ]:CBC 182 : if (result == TM_Ok)
3026 : : {
3027 : 173 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3028 : : false);
3029 : 173 : mtstate->mt_merge_deleted += 1;
3030 : : }
3031 : 182 : break;
3032 : :
3033 : 9 : case CMD_NOTHING:
3034 : : /* Doing nothing is always OK */
3035 : 9 : result = TM_Ok;
3036 : 9 : break;
3037 : :
748 alvherre@alvh.no-ip. 3038 :UBC 0 : default:
15 dean.a.rasheed@gmail 3039 [ # # ]:UNC 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3040 : : }
3041 : :
748 alvherre@alvh.no-ip. 3042 [ + + + + :CBC 1161 : switch (result)
- - ]
3043 : : {
3044 : 1108 : case TM_Ok:
3045 : : /* all good; perform final actions */
514 3046 [ + + + + ]: 1108 : if (canSetTag && commandType != CMD_NOTHING)
748 3047 : 1090 : (estate->es_processed)++;
3048 : :
3049 : 1108 : break;
3050 : :
3051 : 16 : case TM_SelfModified:
3052 : :
3053 : : /*
3054 : : * The target tuple was already updated or deleted by the
3055 : : * current command, or by a later command in the current
3056 : : * transaction. The former case is explicitly disallowed by
3057 : : * the SQL standard for MERGE, which insists that the MERGE
3058 : : * join condition should not join a target row to more than
3059 : : * one source row.
3060 : : *
3061 : : * The latter case arises if the tuple is modified by a
3062 : : * command in a BEFORE trigger, or perhaps by a command in a
3063 : : * volatile function used in the query. In such situations we
3064 : : * should not ignore the MERGE action, but it is equally
3065 : : * unsafe to proceed. We don't want to discard the original
3066 : : * MERGE action while keeping the triggered actions based on
3067 : : * it; and it would be no better to allow the original MERGE
3068 : : * action while discarding the updates that it triggered. So
3069 : : * throwing an error is the only safe course.
3070 : : */
38 dean.a.rasheed@gmail 3071 [ + + ]: 16 : if (context->tmfd.cmax != estate->es_output_cid)
3072 [ + - ]: 6 : ereport(ERROR,
3073 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3074 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3075 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3076 : :
748 alvherre@alvh.no-ip. 3077 [ + - ]: 10 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3078 [ + - ]: 10 : ereport(ERROR,
3079 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3080 : : /* translator: %s is a SQL command name */
3081 : : errmsg("%s command cannot affect row a second time",
3082 : : "MERGE"),
3083 : : errhint("Ensure that not more than one source row matches any one target row.")));
3084 : :
3085 : : /* This shouldn't happen */
748 alvherre@alvh.no-ip. 3086 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3087 : : break;
3088 : :
748 alvherre@alvh.no-ip. 3089 :CBC 5 : case TM_Deleted:
3090 [ - + ]: 5 : if (IsolationUsesXactSnapshot())
748 alvherre@alvh.no-ip. 3091 [ # # ]:UBC 0 : ereport(ERROR,
3092 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3093 : : errmsg("could not serialize access due to concurrent delete")));
3094 : :
3095 : : /*
3096 : : * If the tuple was already deleted, set matched to false to
3097 : : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3098 : : */
28 dean.a.rasheed@gmail 3099 :GNC 5 : *matched = false;
3100 : 5 : return NULL;
3101 : :
748 alvherre@alvh.no-ip. 3102 :CBC 32 : case TM_Updated:
3103 : : {
3104 : : bool was_matched;
3105 : : Relation resultRelationDesc;
3106 : : TupleTableSlot *epqslot,
3107 : : *inputslot;
3108 : : LockTupleMode lockmode;
3109 : :
3110 : : /*
3111 : : * The target tuple was concurrently updated by some other
3112 : : * transaction. If we are currently processing a MATCHED
3113 : : * action, use EvalPlanQual() with the new version of the
3114 : : * tuple and recheck the join qual, to detect a change
3115 : : * from the MATCHED to the NOT MATCHED cases. If we are
3116 : : * already processing a NOT MATCHED BY SOURCE action, we
3117 : : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3118 : : * MATCHED).
3119 : : */
15 dean.a.rasheed@gmail 3120 :GNC 32 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
748 alvherre@alvh.no-ip. 3121 :CBC 32 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3122 : 32 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3123 : :
15 dean.a.rasheed@gmail 3124 [ + - ]:GNC 32 : if (was_matched)
3125 : 32 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3126 : : resultRelInfo->ri_RangeTableIndex);
3127 : : else
15 dean.a.rasheed@gmail 3128 :UNC 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3129 : :
748 alvherre@alvh.no-ip. 3130 :CBC 32 : result = table_tuple_lock(resultRelationDesc, tupleid,
3131 : : estate->es_snapshot,
3132 : : inputslot, estate->es_output_cid,
3133 : : lockmode, LockWaitBlock,
3134 : : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3135 : : &context->tmfd);
3136 [ + - + - ]: 32 : switch (result)
3137 : : {
3138 : 31 : case TM_Ok:
3139 : :
3140 : : /*
3141 : : * If the tuple was updated and migrated to
3142 : : * another partition concurrently, the current
3143 : : * MERGE implementation can't follow. There's
3144 : : * probably a better way to handle this case, but
3145 : : * it'd require recognizing the relation to which
3146 : : * the tuple moved, and setting our current
3147 : : * resultRelInfo to that.
3148 : : */
3149 [ - + ]: 31 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
748 alvherre@alvh.no-ip. 3150 [ # # ]:UBC 0 : ereport(ERROR,
3151 : : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3152 : : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3153 : :
3154 : : /*
3155 : : * If this was a MATCHED case, use EvalPlanQual()
3156 : : * to recheck the join condition.
3157 : : */
15 dean.a.rasheed@gmail 3158 [ + - ]:GNC 31 : if (was_matched)
3159 : : {
3160 : 31 : epqslot = EvalPlanQual(epqstate,
3161 : : resultRelationDesc,
3162 : : resultRelInfo->ri_RangeTableIndex,
3163 : : inputslot);
3164 : :
3165 : : /*
3166 : : * If the subplan didn't return a tuple, then
3167 : : * we must be dealing with an inner join for
3168 : : * which the join condition no longer matches.
3169 : : * This can only happen if there are no NOT
3170 : : * MATCHED actions, and so there is nothing
3171 : : * more to do.
3172 : : */
3173 [ + - - + ]: 31 : if (TupIsNull(epqslot))
15 dean.a.rasheed@gmail 3174 :UNC 0 : return NULL;
3175 : :
3176 : : /*
3177 : : * If we got a NULL ctid from the subplan, the
3178 : : * join quals no longer pass and we switch to
3179 : : * the NOT MATCHED BY SOURCE case.
3180 : : */
15 dean.a.rasheed@gmail 3181 :GNC 31 : (void) ExecGetJunkAttribute(epqslot,
3182 : 31 : resultRelInfo->ri_RowIdAttNo,
3183 : : &isNull);
3184 [ + + ]: 31 : if (isNull)
3185 : 2 : *matched = false;
3186 : :
3187 : : /*
3188 : : * Otherwise, recheck the join quals to see if
3189 : : * we need to switch to the NOT MATCHED BY
3190 : : * SOURCE case.
3191 : : */
3192 [ - + ]: 31 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3193 : : &context->tmfd.ctid,
3194 : : SnapshotAny,
3195 : : resultRelInfo->ri_oldTupleSlot))
15 dean.a.rasheed@gmail 3196 [ # # ]:UNC 0 : elog(ERROR, "failed to fetch the target tuple");
3197 : :
15 dean.a.rasheed@gmail 3198 [ + + ]:GNC 31 : if (*matched)
3199 : 29 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3200 : : econtext);
3201 : :
3202 : : /* Switch lists, if necessary */
3203 [ + + ]: 31 : if (!*matched)
3204 : 3 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3205 : : }
3206 : :
3207 : : /*
3208 : : * Loop back and process the MATCHED or NOT
3209 : : * MATCHED BY SOURCE actions from the start.
3210 : : */
748 alvherre@alvh.no-ip. 3211 :CBC 31 : goto lmerge_matched;
3212 : :
748 alvherre@alvh.no-ip. 3213 :UBC 0 : case TM_Deleted:
3214 : :
3215 : : /*
3216 : : * tuple already deleted; tell caller to run NOT
3217 : : * MATCHED [BY TARGET] actions
3218 : : */
28 dean.a.rasheed@gmail 3219 :UNC 0 : *matched = false;
3220 : 0 : return NULL;
3221 : :
748 alvherre@alvh.no-ip. 3222 :CBC 1 : case TM_SelfModified:
3223 : :
3224 : : /*
3225 : : * This can be reached when following an update
3226 : : * chain from a tuple updated by another session,
3227 : : * reaching a tuple that was already updated or
3228 : : * deleted by the current command, or by a later
3229 : : * command in the current transaction. As above,
3230 : : * this should always be treated as an error.
3231 : : */
3232 [ - + ]: 1 : if (context->tmfd.cmax != estate->es_output_cid)
748 alvherre@alvh.no-ip. 3233 [ # # ]:UBC 0 : ereport(ERROR,
3234 : : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3235 : : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3236 : : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3237 : :
38 dean.a.rasheed@gmail 3238 [ + - ]:CBC 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3239 [ + - ]: 1 : ereport(ERROR,
3240 : : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3241 : : /* translator: %s is a SQL command name */
3242 : : errmsg("%s command cannot affect row a second time",
3243 : : "MERGE"),
3244 : : errhint("Ensure that not more than one source row matches any one target row.")));
3245 : :
3246 : : /* This shouldn't happen */
38 dean.a.rasheed@gmail 3247 [ # # ]:UBC 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3248 : : return NULL;
3249 : :
748 alvherre@alvh.no-ip. 3250 : 0 : default:
3251 : : /* see table_tuple_lock call in ExecDelete() */
3252 [ # # ]: 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3253 : : result);
3254 : : return NULL;
3255 : : }
3256 : : }
3257 : :
3258 : 0 : case TM_Invisible:
3259 : : case TM_WouldBlock:
3260 : : case TM_BeingModified:
3261 : : /* these should not occur */
3262 [ # # ]: 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3263 : : break;
3264 : : }
3265 : :
3266 : : /* Process RETURNING if present */
28 dean.a.rasheed@gmail 3267 [ + + ]:GNC 1108 : if (resultRelInfo->ri_projectReturning)
3268 : : {
3269 [ + + - - ]: 120 : switch (commandType)
3270 : : {
3271 : 72 : case CMD_UPDATE:
3272 : 72 : rslot = ExecProcessReturning(resultRelInfo, newslot,
3273 : : context->planSlot);
3274 : 72 : break;
3275 : :
3276 : 48 : case CMD_DELETE:
3277 : 48 : rslot = ExecProcessReturning(resultRelInfo,
3278 : : resultRelInfo->ri_oldTupleSlot,
3279 : : context->planSlot);
3280 : 48 : break;
3281 : :
28 dean.a.rasheed@gmail 3282 :UNC 0 : case CMD_NOTHING:
3283 : 0 : break;
3284 : :
3285 : 0 : default:
3286 [ # # ]: 0 : elog(ERROR, "unrecognized commandType: %d",
3287 : : (int) commandType);
3288 : : }
3289 : : }
3290 : :
3291 : : /*
3292 : : * We've activated one of the WHEN clauses, so we don't search
3293 : : * further. This is required behaviour, not an optimization.
3294 : : */
748 alvherre@alvh.no-ip. 3295 :CBC 1108 : break;
3296 : : }
3297 : :
3298 : : /*
3299 : : * Successfully executed an action or no qualifying action was found.
3300 : : */
28 dean.a.rasheed@gmail 3301 :GNC 1809 : return rslot;
3302 : : }
3303 : :
3304 : : /*
3305 : : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3306 : : */
3307 : : static TupleTableSlot *
748 alvherre@alvh.no-ip. 3308 :CBC 1318 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3309 : : bool canSetTag)
3310 : : {
3311 : 1318 : ModifyTableState *mtstate = context->mtstate;
3312 : 1318 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3313 : : List *actionStates;
28 dean.a.rasheed@gmail 3314 :GNC 1318 : TupleTableSlot *rslot = NULL;
3315 : : ListCell *l;
3316 : :
3317 : : /*
3318 : : * For INSERT actions, the root relation's merge action is OK since the
3319 : : * INSERT's targetlist and the WHEN conditions can only refer to the
3320 : : * source relation and hence it does not matter which result relation we
3321 : : * work with.
3322 : : *
3323 : : * XXX does this mean that we can avoid creating copies of actionStates on
3324 : : * partitioned tables, for not-matched actions?
3325 : : */
15 3326 : 1318 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3327 : :
3328 : : /*
3329 : : * Make source tuple available to ExecQual and ExecProject. We don't need
3330 : : * the target tuple, since the WHEN quals and targetlist can't refer to
3331 : : * the target columns.
3332 : : */
748 alvherre@alvh.no-ip. 3333 :CBC 1318 : econtext->ecxt_scantuple = NULL;
3334 : 1318 : econtext->ecxt_innertuple = context->planSlot;
3335 : 1318 : econtext->ecxt_outertuple = NULL;
3336 : :
3337 [ + - + + : 1753 : foreach(l, actionStates)
+ + ]
3338 : : {
3339 : 1318 : MergeActionState *action = (MergeActionState *) lfirst(l);
3340 : 1318 : CmdType commandType = action->mas_action->commandType;
3341 : : TupleTableSlot *newslot;
3342 : :
3343 : : /*
3344 : : * Test condition, if any.
3345 : : *
3346 : : * In the absence of any condition, we perform the action
3347 : : * unconditionally (no need to check separately since ExecQual() will
3348 : : * return true if there are no conditions to evaluate).
3349 : : */
3350 [ + + ]: 1318 : if (!ExecQual(action->mas_whenqual, econtext))
3351 : 435 : continue;
3352 : :
3353 : : /* Perform stated action */
3354 [ + - - ]: 883 : switch (commandType)
3355 : : {
3356 : 883 : case CMD_INSERT:
3357 : :
3358 : : /*
3359 : : * Project the tuple. In case of a partitioned table, the
3360 : : * projection was already built to use the root's descriptor,
3361 : : * so we don't need to map the tuple here.
3362 : : */
3363 : 883 : newslot = ExecProject(action->mas_proj);
28 dean.a.rasheed@gmail 3364 :GNC 883 : mtstate->mt_merge_action = action;
3365 : :
3366 : 883 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3367 : : newslot, canSetTag, NULL, NULL);
748 alvherre@alvh.no-ip. 3368 :CBC 856 : mtstate->mt_merge_inserted += 1;
3369 : 856 : break;
748 alvherre@alvh.no-ip. 3370 :UBC 0 : case CMD_NOTHING:
3371 : : /* Do nothing */
3372 : 0 : break;
3373 : 0 : default:
3374 [ # # ]: 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3375 : : }
3376 : :
3377 : : /*
3378 : : * We've activated one of the WHEN clauses, so we don't search
3379 : : * further. This is required behaviour, not an optimization.
3380 : : */
748 alvherre@alvh.no-ip. 3381 :CBC 856 : break;
3382 : : }
3383 : :
28 dean.a.rasheed@gmail 3384 :GNC 1291 : return rslot;
748 alvherre@alvh.no-ip. 3385 :ECB (1066) : }
3386 : :
3387 : : /*
3388 : : * Initialize state for execution of MERGE.
3389 : : */
3390 : : void
748 alvherre@alvh.no-ip. 3391 :CBC 689 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3392 : : {
3393 : 689 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3394 : 689 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3395 : : ResultRelInfo *resultRelInfo;
3396 : : ExprContext *econtext;
3397 : : ListCell *lc;
3398 : : int i;
3399 : :
3400 [ - + ]: 689 : if (node->mergeActionLists == NIL)
748 alvherre@alvh.no-ip. 3401 :UBC 0 : return;
3402 : :
748 alvherre@alvh.no-ip. 3403 :CBC 689 : mtstate->mt_merge_subcommands = 0;
3404 : :
3405 [ + + ]: 689 : if (mtstate->ps.ps_ExprContext == NULL)
3406 : 614 : ExecAssignExprContext(estate, &mtstate->ps);
3407 : 689 : econtext = mtstate->ps.ps_ExprContext;
3408 : :
3409 : : /*
3410 : : * Create a MergeActionState for each action on the mergeActionList and
3411 : : * add it to either a list of matched actions or not-matched actions.
3412 : : *
3413 : : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3414 : : * anything here, do so there too.
3415 : : */
3416 : 689 : i = 0;
3417 [ + - + + : 1494 : foreach(lc, node->mergeActionLists)
+ + ]
3418 : : {
3419 : 805 : List *mergeActionList = lfirst(lc);
3420 : : Node *joinCondition;
3421 : : TupleDesc relationDesc;
3422 : : ListCell *l;
3423 : :
15 dean.a.rasheed@gmail 3424 :GNC 805 : joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
748 alvherre@alvh.no-ip. 3425 :CBC 805 : resultRelInfo = mtstate->resultRelInfo + i;
3426 : 805 : i++;
3427 : 805 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3428 : :
3429 : : /* initialize slots for MERGE fetches from this rel */
3430 [ + - ]: 805 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3431 : 805 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3432 : :
3433 : : /* initialize state for join condition checking */
15 dean.a.rasheed@gmail 3434 :GNC 805 : resultRelInfo->ri_MergeJoinCondition =
3435 : 805 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3436 : :
748 alvherre@alvh.no-ip. 3437 [ + - + + :CBC 2249 : foreach(l, mergeActionList)
+ + ]
3438 : : {
3439 : 1444 : MergeAction *action = (MergeAction *) lfirst(l);
3440 : : MergeActionState *action_state;
3441 : : TupleTableSlot *tgtslot;
3442 : : TupleDesc tgtdesc;
3443 : :
3444 : : /*
3445 : : * Build action merge state for this rel. (For partitions,
3446 : : * equivalent code exists in ExecInitPartitionInfo.)
3447 : : */
3448 : 1444 : action_state = makeNode(MergeActionState);
3449 : 1444 : action_state->mas_action = action;
3450 : 1444 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3451 : : &mtstate->ps);
3452 : :
3453 : : /*
3454 : : * We create three lists - one for each MergeMatchKind - and stick
3455 : : * the MergeActionState into the appropriate list.
3456 : : */
15 dean.a.rasheed@gmail 3457 :GNC 2888 : resultRelInfo->ri_MergeActions[action->matchKind] =
3458 : 1444 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3459 : : action_state);
3460 : :
748 alvherre@alvh.no-ip. 3461 [ + + + + :CBC 1444 : switch (action->commandType)
- ]
3462 : : {
3463 : 482 : case CMD_INSERT:
3464 : 482 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3465 : : action->targetList);
3466 : :
3467 : : /*
3468 : : * If the MERGE targets a partitioned table, any INSERT
3469 : : * actions must be routed through it, not the child
3470 : : * relations. Initialize the routing struct and the root
3471 : : * table's "new" tuple slot for that, if not already done.
3472 : : * The projection we prepare, for all relations, uses the
3473 : : * root relation descriptor, and targets the plan's root
3474 : : * slot. (This is consistent with the fact that we
3475 : : * checked the plan output to match the root relation,
3476 : : * above.)
3477 : : */
3478 [ + + ]: 482 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3479 : : RELKIND_PARTITIONED_TABLE)
3480 : : {
3481 [ + + ]: 149 : if (mtstate->mt_partition_tuple_routing == NULL)
3482 : : {
3483 : : /*
3484 : : * Initialize planstate for routing if not already
3485 : : * done.
3486 : : *
3487 : : * Note that the slot is managed as a standalone
3488 : : * slot belonging to ModifyTableState, so we pass
3489 : : * NULL for the 2nd argument.
3490 : : */
3491 : 62 : mtstate->mt_root_tuple_slot =
3492 : 62 : table_slot_create(rootRelInfo->ri_RelationDesc,
3493 : : NULL);
3494 : 62 : mtstate->mt_partition_tuple_routing =
3495 : 62 : ExecSetupPartitionTupleRouting(estate,
3496 : : rootRelInfo->ri_RelationDesc);
3497 : : }
3498 : 149 : tgtslot = mtstate->mt_root_tuple_slot;
3499 : 149 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3500 : : }
3501 : : else
3502 : : {
3503 : : /* not partitioned? use the stock relation and slot */
3504 : 333 : tgtslot = resultRelInfo->ri_newTupleSlot;
3505 : 333 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3506 : : }
3507 : :
3508 : 482 : action_state->mas_proj =
3509 : 482 : ExecBuildProjectionInfo(action->targetList, econtext,
3510 : : tgtslot,
3511 : : &mtstate->ps,
3512 : : tgtdesc);
3513 : :
3514 : 482 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3515 : 482 : break;
3516 : 740 : case CMD_UPDATE:
3517 : 740 : action_state->mas_proj =
3518 : 740 : ExecBuildUpdateProjection(action->targetList,
3519 : : true,
3520 : : action->updateColnos,
3521 : : relationDesc,
3522 : : econtext,
3523 : : resultRelInfo->ri_newTupleSlot,
3524 : : &mtstate->ps);
3525 : 740 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3526 : 740 : break;
3527 : 202 : case CMD_DELETE:
3528 : 202 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3529 : 202 : break;
3530 : 20 : case CMD_NOTHING:
3531 : 20 : break;
748 alvherre@alvh.no-ip. 3532 :UBC 0 : default:
3533 [ # # ]: 0 : elog(ERROR, "unknown operation");
3534 : : break;
3535 : : }
3536 : : }
3537 : : }
3538 : : }
3539 : :
3540 : : /*
3541 : : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3542 : : *
3543 : : * We mark 'projectNewInfoValid' even though the projections themselves
3544 : : * are not initialized here.
3545 : : */
3546 : : void
748 alvherre@alvh.no-ip. 3547 :CBC 814 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3548 : : ResultRelInfo *resultRelInfo)
3549 : : {
3550 : 814 : EState *estate = mtstate->ps.state;
3551 : :
3552 [ - + ]: 814 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3553 : :
3554 : 814 : resultRelInfo->ri_oldTupleSlot =
3555 : 814 : table_slot_create(resultRelInfo->ri_RelationDesc,
3556 : : &estate->es_tupleTable);
3557 : 814 : resultRelInfo->ri_newTupleSlot =
3558 : 814 : table_slot_create(resultRelInfo->ri_RelationDesc,
3559 : : &estate->es_tupleTable);
3560 : 814 : resultRelInfo->ri_projectNewInfoValid = true;
3561 : 814 : }
3562 : :
3563 : : /*
3564 : : * Process BEFORE EACH STATEMENT triggers
3565 : : */
3566 : : static void
5300 tgl@sss.pgh.pa.us 3567 : 57139 : fireBSTriggers(ModifyTableState *node)
3568 : : {
2218 alvherre@alvh.no-ip. 3569 : 57139 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1273 heikki.linnakangas@i 3570 : 57139 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3571 : :
5300 tgl@sss.pgh.pa.us 3572 [ + + + + : 57139 : switch (node->operation)
- ]
3573 : : {
3574 : 43972 : case CMD_INSERT:
2540 rhaas@postgresql.org 3575 : 43972 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
2218 alvherre@alvh.no-ip. 3576 [ + + ]: 43966 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3264 andres@anarazel.de 3577 : 414 : ExecBSUpdateTriggers(node->ps.state,
3578 : : resultRelInfo);
5300 tgl@sss.pgh.pa.us 3579 : 43966 : break;
3580 : 6502 : case CMD_UPDATE:
2540 rhaas@postgresql.org 3581 : 6502 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
5300 tgl@sss.pgh.pa.us 3582 : 6502 : break;
3583 : 6030 : case CMD_DELETE:
2540 rhaas@postgresql.org 3584 : 6030 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
5300 tgl@sss.pgh.pa.us 3585 : 6030 : break;
748 alvherre@alvh.no-ip. 3586 : 635 : case CMD_MERGE:
3587 [ + + ]: 635 : if (node->mt_merge_subcommands & MERGE_INSERT)
3588 : 359 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3589 [ + + ]: 635 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3590 : 450 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3591 [ + + ]: 635 : if (node->mt_merge_subcommands & MERGE_DELETE)
3592 : 166 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3593 : 635 : break;
5300 tgl@sss.pgh.pa.us 3594 :UBC 0 : default:
3595 [ # # ]: 0 : elog(ERROR, "unknown operation");
3596 : : break;
3597 : : }
5300 tgl@sss.pgh.pa.us 3598 :CBC 57133 : }
3599 : :
3600 : : /*
3601 : : * Process AFTER EACH STATEMENT triggers
3602 : : */
3603 : : static void
2482 rhodiumtoad@postgres 3604 : 55622 : fireASTriggers(ModifyTableState *node)
3605 : : {
2218 alvherre@alvh.no-ip. 3606 : 55622 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
1273 heikki.linnakangas@i 3607 : 55622 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3608 : :
5300 tgl@sss.pgh.pa.us 3609 [ + + + + : 55622 : switch (node->operation)
- ]
3610 : : {
3611 : 42885 : case CMD_INSERT:
2218 alvherre@alvh.no-ip. 3612 [ + + ]: 42885 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3264 andres@anarazel.de 3613 : 363 : ExecASUpdateTriggers(node->ps.state,
3614 : : resultRelInfo,
2402 tgl@sss.pgh.pa.us 3615 : 363 : node->mt_oc_transition_capture);
2482 rhodiumtoad@postgres 3616 : 42885 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3617 : 42885 : node->mt_transition_capture);
5300 tgl@sss.pgh.pa.us 3618 : 42885 : break;
3619 : 6206 : case CMD_UPDATE:
2482 rhodiumtoad@postgres 3620 : 6206 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3621 : 6206 : node->mt_transition_capture);
5300 tgl@sss.pgh.pa.us 3622 : 6206 : break;
3623 : 5968 : case CMD_DELETE:
2482 rhodiumtoad@postgres 3624 : 5968 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3625 : 5968 : node->mt_transition_capture);
5300 tgl@sss.pgh.pa.us 3626 : 5968 : break;
748 alvherre@alvh.no-ip. 3627 : 563 : case CMD_MERGE:
3628 [ + + ]: 563 : if (node->mt_merge_subcommands & MERGE_DELETE)
3629 : 148 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3630 : 148 : node->mt_transition_capture);
3631 [ + + ]: 563 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3632 : 402 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3633 : 402 : node->mt_transition_capture);
3634 [ + + ]: 563 : if (node->mt_merge_subcommands & MERGE_INSERT)
3635 : 328 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3636 : 328 : node->mt_transition_capture);
3637 : 563 : break;
5300 tgl@sss.pgh.pa.us 3638 :UBC 0 : default:
3639 [ # # ]: 0 : elog(ERROR, "unknown operation");
3640 : : break;
3641 : : }
5300 tgl@sss.pgh.pa.us 3642 :CBC 55622 : }
3643 : :
3644 : : /*
3645 : : * Set up the state needed for collecting transition tuples for AFTER
3646 : : * triggers.
3647 : : */
3648 : : static void
2482 rhodiumtoad@postgres 3649 : 57288 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3650 : : {
2218 alvherre@alvh.no-ip. 3651 : 57288 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
1273 heikki.linnakangas@i 3652 : 57288 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3653 : :
3654 : : /* Check for transition tables on the directly targeted relation. */
2482 rhodiumtoad@postgres 3655 : 57288 : mtstate->mt_transition_capture =
2402 tgl@sss.pgh.pa.us 3656 : 57288 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3657 : 57288 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3658 : : mtstate->operation);
2218 alvherre@alvh.no-ip. 3659 [ + + ]: 57288 : if (plan->operation == CMD_INSERT &&
3660 [ + + ]: 43973 : plan->onConflictAction == ONCONFLICT_UPDATE)
2402 tgl@sss.pgh.pa.us 3661 : 414 : mtstate->mt_oc_transition_capture =
3662 : 414 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3663 : 414 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3664 : : CMD_UPDATE);
2277 rhaas@postgresql.org 3665 : 57288 : }
3666 : :
3667 : : /*
3668 : : * ExecPrepareTupleRouting --- prepare for routing one tuple
3669 : : *
3670 : : * Determine the partition in which the tuple in slot is to be inserted,
3671 : : * and return its ResultRelInfo in *partRelInfo. The return value is
3672 : : * a slot holding the tuple of the partition rowtype.
3673 : : *
3674 : : * This also sets the transition table information in mtstate based on the
3675 : : * selected partition.
3676 : : */
3677 : : static TupleTableSlot *
2218 alvherre@alvh.no-ip. 3678 : 361343 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3679 : : EState *estate,
3680 : : PartitionTupleRouting *proute,
3681 : : ResultRelInfo *targetRelInfo,
3682 : : TupleTableSlot *slot,
3683 : : ResultRelInfo **partRelInfo)
3684 : : {
3685 : : ResultRelInfo *partrel;
3686 : : TupleConversionMap *map;
3687 : :
3688 : : /*
3689 : : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3690 : : * not find a valid partition for the tuple in 'slot' then an error is
3691 : : * raised. An error may also be raised if the found partition is not a
3692 : : * valid target for INSERTs. This is required since a partitioned table
3693 : : * UPDATE to another partition becomes a DELETE+INSERT.
3694 : : */
1976 3695 : 361343 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3696 : :
3697 : : /*
3698 : : * If we're capturing transition tuples, we might need to convert from the
3699 : : * partition rowtype to root partitioned table's rowtype. But if there
3700 : : * are no BEFORE triggers on the partition that could change the tuple, we
3701 : : * can just remember the original unconverted tuple to avoid a needless
3702 : : * round trip conversion.
3703 : : */
2218 3704 [ + + ]: 361241 : if (mtstate->mt_transition_capture != NULL)
3705 : : {
3706 : : bool has_before_insert_row_trig;
3707 : :
1273 heikki.linnakangas@i 3708 [ + + ]: 84 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3709 [ + + ]: 21 : partrel->ri_TrigDesc->trig_insert_before_row);
3710 : :
3711 : 63 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3712 [ + + ]: 63 : !has_before_insert_row_trig ? slot : NULL;
3713 : : }
3714 : :
3715 : : /*
3716 : : * Convert the tuple, if necessary.
3717 : : */
499 alvherre@alvh.no-ip. 3718 : 361241 : map = ExecGetRootToChildMap(partrel, estate);
2021 andres@anarazel.de 3719 [ + + ]: 361241 : if (map != NULL)
3720 : : {
1273 heikki.linnakangas@i 3721 : 34211 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3722 : :
2021 andres@anarazel.de 3723 : 34211 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3724 : : }
3725 : :
1278 heikki.linnakangas@i 3726 : 361241 : *partRelInfo = partrel;
2218 alvherre@alvh.no-ip. 3727 : 361241 : return slot;
3728 : : }
3729 : :
3730 : : /* ----------------------------------------------------------------
3731 : : * ExecModifyTable
3732 : : *
3733 : : * Perform table modifications as required, and return RETURNING results
3734 : : * if needed.
3735 : : * ----------------------------------------------------------------
3736 : : */
3737 : : static TupleTableSlot *
2463 andres@anarazel.de 3738 : 61234 : ExecModifyTable(PlanState *pstate)
3739 : : {
3740 : 61234 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3741 : : ModifyTableContext context;
5161 bruce@momjian.us 3742 : 61234 : EState *estate = node->ps.state;
3743 : 61234 : CmdType operation = node->operation;
3744 : : ResultRelInfo *resultRelInfo;
3745 : : PlanState *subplanstate;
3746 : : TupleTableSlot *slot;
3747 : : TupleTableSlot *oldSlot;
3748 : : ItemPointerData tuple_ctid;
3749 : : HeapTupleData oldtupdata;
3750 : : HeapTuple oldtuple;
3751 : : ItemPointer tupleid;
3752 : :
2455 andres@anarazel.de 3753 [ - + ]: 61234 : CHECK_FOR_INTERRUPTS();
3754 : :
3755 : : /*
3756 : : * This should NOT get called during EvalPlanQual; we should have passed a
3757 : : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3758 : : * Assert because this condition is easy to miss in testing. (Note:
3759 : : * although ModifyTable should not get executed within an EvalPlanQual
3760 : : * operation, we do have to allow it to be initialized and shut down in
3761 : : * case it is within a CTE subplan. Hence this test must be here, not in
3762 : : * ExecInitModifyTable.)
3763 : : */
1683 3764 [ - + ]: 61234 : if (estate->es_epq_active != NULL)
4460 tgl@sss.pgh.pa.us 3765 [ # # ]:UBC 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3766 : :
3767 : : /*
3768 : : * If we've already completed processing, don't try to do more. We need
3769 : : * this test because ExecPostprocessPlan might call us an extra time, and
3770 : : * our subplan's nodes aren't necessarily robust against being called
3771 : : * extra times.
3772 : : */
4797 tgl@sss.pgh.pa.us 3773 [ + + ]:CBC 61234 : if (node->mt_done)
3774 : 391 : return NULL;
3775 : :
3776 : : /*
3777 : : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3778 : : */
5300 3779 [ + + ]: 60843 : if (node->fireBSTriggers)
3780 : : {
3781 : 57139 : fireBSTriggers(node);
3782 : 57133 : node->fireBSTriggers = false;
3783 : : }
3784 : :
3785 : : /* Preload local variables */
1110 3786 : 60837 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3787 : 60837 : subplanstate = outerPlanState(node);
3788 : :
3789 : : /* Set global context */
759 alvherre@alvh.no-ip. 3790 : 60837 : context.mtstate = node;
3791 : 60837 : context.epqstate = &node->mt_epqstate;
3792 : 60837 : context.estate = estate;
3793 : :
3794 : : /*
3795 : : * Fetch rows from subplan, and execute the required table modification
3796 : : * for each row.
3797 : : */
3798 : : for (;;)
3799 : : {
3800 : : /*
3801 : : * Reset the per-output-tuple exprcontext. This is needed because
3802 : : * triggers expect to use that context as workspace. It's a bit ugly
3803 : : * to do this below the top level of the plan, however. We might need
3804 : : * to rethink this later.
3805 : : */
4988 tgl@sss.pgh.pa.us 3806 [ + + ]: 6721622 : ResetPerTupleExprContext(estate);
3807 : :
3808 : : /*
3809 : : * Reset per-tuple memory context used for processing on conflict and
3810 : : * returning clauses, to free any expression evaluation storage
3811 : : * allocated in the previous cycle.
3812 : : */
1979 andres@anarazel.de 3813 [ + + ]: 6721622 : if (pstate->ps_ExprContext)
3814 : 166361 : ResetExprContext(pstate->ps_ExprContext);
3815 : :
3816 : : /*
3817 : : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
3818 : : * to execute, do so now --- see the comments in ExecMerge().
3819 : : */
15 dean.a.rasheed@gmail 3820 [ + + ]:GNC 6721622 : if (node->mt_merge_pending_not_matched != NULL)
3821 : : {
3822 : 1 : context.planSlot = node->mt_merge_pending_not_matched;
3823 : :
3824 : 1 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
3825 : 1 : node->canSetTag);
3826 : :
3827 : : /* Clear the pending action */
3828 : 1 : node->mt_merge_pending_not_matched = NULL;
3829 : :
3830 : : /*
3831 : : * If we got a RETURNING result, return it to the caller. We'll
3832 : : * continue the work on next call.
3833 : : */
3834 [ + - ]: 1 : if (slot)
3835 : 1 : return slot;
3836 : :
15 dean.a.rasheed@gmail 3837 :UNC 0 : continue; /* continue with the next tuple */
3838 : : }
3839 : :
3840 : : /* Fetch the next row from subplan */
725 alvherre@alvh.no-ip. 3841 :CBC 6721621 : context.planSlot = ExecProcNode(subplanstate);
3842 : :
3843 : : /* No more tuples to process? */
3844 [ + + + + ]: 6721421 : if (TupIsNull(context.planSlot))
3845 : : break;
3846 : :
3847 : : /*
3848 : : * When there are multiple result relations, each tuple contains a
3849 : : * junk column that gives the OID of the rel from which it came.
3850 : : * Extract it and select the correct result relation.
3851 : : */
1110 tgl@sss.pgh.pa.us 3852 [ + + ]: 6665799 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3853 : : {
3854 : : Datum datum;
3855 : : bool isNull;
3856 : : Oid resultoid;
3857 : :
725 alvherre@alvh.no-ip. 3858 : 2423 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3859 : : &isNull);
1110 tgl@sss.pgh.pa.us 3860 [ + + ]: 2423 : if (isNull)
3861 : : {
3862 : : /*
3863 : : * For commands other than MERGE, any tuples having InvalidOid
3864 : : * for tableoid are errors. For MERGE, we may need to handle
3865 : : * them as WHEN NOT MATCHED clauses if any, so do that.
3866 : : *
3867 : : * Note that we use the node's toplevel resultRelInfo, not any
3868 : : * specific partition's.
3869 : : */
748 alvherre@alvh.no-ip. 3870 [ + - ]: 233 : if (operation == CMD_MERGE)
3871 : : {
725 3872 : 233 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3873 : :
28 dean.a.rasheed@gmail 3874 :GNC 233 : slot = ExecMerge(&context, node->resultRelInfo,
3875 : 233 : NULL, NULL, node->canSetTag);
3876 : :
3877 : : /*
3878 : : * If we got a RETURNING result, return it to the caller.
3879 : : * We'll continue the work on next call.
3880 : : */
3881 [ + + ]: 230 : if (slot)
3882 : 10 : return slot;
3883 : :
3884 : 220 : continue; /* continue with the next tuple */
3885 : : }
3886 : :
1110 tgl@sss.pgh.pa.us 3887 [ # # ]:UBC 0 : elog(ERROR, "tableoid is NULL");
3888 : : }
1110 tgl@sss.pgh.pa.us 3889 :CBC 2190 : resultoid = DatumGetObjectId(datum);
3890 : :
3891 : : /* If it's not the same as last time, we need to locate the rel */
3892 [ + + ]: 2190 : if (resultoid != node->mt_lastResultOid)
1104 3893 : 1483 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3894 : : false, true);
3895 : : }
3896 : :
3897 : : /*
3898 : : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3899 : : * here is compute the RETURNING expressions.
3900 : : */
2949 rhaas@postgresql.org 3901 [ + + ]: 6665566 : if (resultRelInfo->ri_usesFdwDirectModify)
3902 : : {
3903 [ - + ]: 347 : Assert(resultRelInfo->ri_projectReturning);
3904 : :
3905 : : /*
3906 : : * A scan slot containing the data that was actually inserted,
3907 : : * updated or deleted has already been made available to
3908 : : * ExecProcessReturning by IterateDirectModify, so no need to
3909 : : * provide it here.
3910 : : */
725 alvherre@alvh.no-ip. 3911 : 347 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3912 : :
2949 rhaas@postgresql.org 3913 : 347 : return slot;
3914 : : }
3915 : :
725 alvherre@alvh.no-ip. 3916 : 6665219 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3917 : 6665219 : slot = context.planSlot;
3918 : :
2330 tgl@sss.pgh.pa.us 3919 : 6665219 : tupleid = NULL;
3675 noah@leadboat.com 3920 : 6665219 : oldtuple = NULL;
3921 : :
3922 : : /*
3923 : : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3924 : : * to be updated/deleted/merged. For a heap relation, that's a TID;
3925 : : * otherwise we may have a wholerow junk attr that carries the old
3926 : : * tuple in toto. Keep this in step with the part of
3927 : : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3928 : : */
748 alvherre@alvh.no-ip. 3929 [ + + + + : 6665219 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
3930 : : operation == CMD_MERGE)
3931 : : {
3932 : : char relkind;
3933 : : Datum datum;
3934 : : bool isNull;
3935 : :
1110 tgl@sss.pgh.pa.us 3936 : 976553 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3937 [ + + + + ]: 976553 : if (relkind == RELKIND_RELATION ||
3938 [ + + ]: 243 : relkind == RELKIND_MATVIEW ||
3939 : : relkind == RELKIND_PARTITIONED_TABLE)
3940 : : {
3941 : : /* ri_RowIdAttNo refers to a ctid attribute */
3942 [ - + ]: 976313 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3943 : 976313 : datum = ExecGetJunkAttribute(slot,
3944 : 976313 : resultRelInfo->ri_RowIdAttNo,
3945 : : &isNull);
3946 : :
3947 : : /*
3948 : : * For commands other than MERGE, any tuples having a null row
3949 : : * identifier are errors. For MERGE, we may need to handle
3950 : : * them as WHEN NOT MATCHED clauses if any, so do that.
3951 : : *
3952 : : * Note that we use the node's toplevel resultRelInfo, not any
3953 : : * specific partition's.
3954 : : */
3955 [ + + ]: 976313 : if (isNull)
3956 : : {
748 alvherre@alvh.no-ip. 3957 [ + - ]: 1053 : if (operation == CMD_MERGE)
3958 : : {
725 3959 : 1053 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3960 : :
28 dean.a.rasheed@gmail 3961 :GNC 1053 : slot = ExecMerge(&context, node->resultRelInfo,
3962 : 1053 : NULL, NULL, node->canSetTag);
3963 : :
3964 : : /*
3965 : : * If we got a RETURNING result, return it to the
3966 : : * caller. We'll continue the work on next call.
3967 : : */
3968 [ + + ]: 1032 : if (slot)
3969 : 52 : return slot;
3970 : :
3971 : 1001 : continue; /* continue with the next tuple */
3972 : : }
3973 : :
1110 tgl@sss.pgh.pa.us 3974 [ # # ]:UBC 0 : elog(ERROR, "ctid is NULL");
3975 : : }
3976 : :
1110 tgl@sss.pgh.pa.us 3977 :CBC 975260 : tupleid = (ItemPointer) DatumGetPointer(datum);
3978 : 975260 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3979 : 975260 : tupleid = &tuple_ctid;
3980 : : }
3981 : :
3982 : : /*
3983 : : * Use the wholerow attribute, when available, to reconstruct the
3984 : : * old relation tuple. The old tuple serves one or both of two
3985 : : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3986 : : * provides values for any unchanged columns for the NEW tuple of
3987 : : * an UPDATE, because the subplan does not produce all the columns
3988 : : * of the target table.
3989 : : *
3990 : : * Note that the wholerow attribute does not carry system columns,
3991 : : * so foreign table triggers miss seeing those, except that we
3992 : : * know enough here to set t_tableOid. Quite separately from
3993 : : * this, the FDW may fetch its own junk attrs to identify the row.
3994 : : *
3995 : : * Other relevant relkinds, currently limited to views, always
3996 : : * have a wholerow attribute.
3997 : : */
3998 [ + + ]: 240 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3999 : : {
4000 : 231 : datum = ExecGetJunkAttribute(slot,
4001 : 231 : resultRelInfo->ri_RowIdAttNo,
4002 : : &isNull);
4003 : :
4004 : : /*
4005 : : * For commands other than MERGE, any tuples having a null row
4006 : : * identifier are errors. For MERGE, we may need to handle
4007 : : * them as WHEN NOT MATCHED clauses if any, so do that.
4008 : : *
4009 : : * Note that we use the node's toplevel resultRelInfo, not any
4010 : : * specific partition's.
4011 : : */
4012 [ + + ]: 231 : if (isNull)
4013 : : {
45 dean.a.rasheed@gmail 4014 [ + - ]:GNC 24 : if (operation == CMD_MERGE)
4015 : : {
4016 : 24 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4017 : :
28 4018 : 24 : slot = ExecMerge(&context, node->resultRelInfo,
4019 : 24 : NULL, NULL, node->canSetTag);
4020 : :
4021 : : /*
4022 : : * If we got a RETURNING result, return it to the
4023 : : * caller. We'll continue the work on next call.
4024 : : */
4025 [ + + ]: 21 : if (slot)
4026 : 6 : return slot;
4027 : :
4028 : 15 : continue; /* continue with the next tuple */
4029 : : }
4030 : :
1110 tgl@sss.pgh.pa.us 4031 [ # # ]:UBC 0 : elog(ERROR, "wholerow is NULL");
4032 : : }
4033 : :
1110 tgl@sss.pgh.pa.us 4034 :CBC 207 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4035 : 207 : oldtupdata.t_len =
4036 : 207 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4037 : 207 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4038 : : /* Historically, view triggers see invalid t_tableOid. */
4039 : 207 : oldtupdata.t_tableOid =
4040 [ + + ]: 207 : (relkind == RELKIND_VIEW) ? InvalidOid :
4041 : 81 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4042 : :
4043 : 207 : oldtuple = &oldtupdata;
4044 : : }
4045 : : else
4046 : : {
4047 : : /* Only foreign tables are allowed to omit a row-ID attr */
4048 [ - + ]: 9 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4049 : : }
4050 : : }
4051 : :
5300 4052 [ + + + + : 6664142 : switch (operation)
- ]
4053 : : {
4054 : 5688666 : case CMD_INSERT:
4055 : : /* Initialize projection info if first time for this table */
1104 4056 [ + + ]: 5688666 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4057 : 43436 : ExecInitInsertProjection(node, resultRelInfo);
725 alvherre@alvh.no-ip. 4058 : 5688666 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
759 4059 : 5688666 : slot = ExecInsert(&context, resultRelInfo, slot,
756 4060 : 5688666 : node->canSetTag, NULL, NULL);
5300 tgl@sss.pgh.pa.us 4061 : 5687675 : break;
4062 : :
4063 : 153376 : case CMD_UPDATE:
4064 : : /* Initialize projection info if first time for this table */
1104 4065 [ + + ]: 153376 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4066 : 6368 : ExecInitUpdateProjection(node, resultRelInfo);
4067 : :
4068 : : /*
4069 : : * Make the new tuple by combining plan's output tuple with
4070 : : * the old tuple being updated.
4071 : : */
1110 4072 : 153376 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4073 [ + + ]: 153376 : if (oldtuple != NULL)
4074 : : {
4075 : : /* Use the wholerow junk attr as the old tuple. */
4076 : 129 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4077 : : }
4078 : : else
4079 : : {
4080 : : /* Fetch the most recent version of old tuple. */
4081 : 153247 : Relation relation = resultRelInfo->ri_RelationDesc;
4082 : :
4083 [ - + ]: 153247 : if (!table_tuple_fetch_row_version(relation, tupleid,
4084 : : SnapshotAny,
4085 : : oldSlot))
1110 tgl@sss.pgh.pa.us 4086 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple being updated");
4087 : : }
398 dean.a.rasheed@gmail 4088 :CBC 153376 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4089 : : oldSlot);
4090 : :
4091 : : /* Now apply the update. */
759 alvherre@alvh.no-ip. 4092 : 153376 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
3 akorotkov@postgresql 4093 : 153376 : slot, node->canSetTag);
5300 tgl@sss.pgh.pa.us 4094 : 153175 : break;
4095 : :
4096 : 819904 : case CMD_DELETE:
759 alvherre@alvh.no-ip. 4097 : 819904 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
3 akorotkov@postgresql 4098 : 819904 : true, false, node->canSetTag, NULL, NULL, NULL);
5300 tgl@sss.pgh.pa.us 4099 : 819863 : break;
4100 : :
748 alvherre@alvh.no-ip. 4101 : 2196 : case CMD_MERGE:
45 dean.a.rasheed@gmail 4102 :GNC 2196 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4103 : 2196 : node->canSetTag);
748 alvherre@alvh.no-ip. 4104 :CBC 2151 : break;
4105 : :
5300 tgl@sss.pgh.pa.us 4106 :UBC 0 : default:
4107 [ # # ]: 0 : elog(ERROR, "unknown operation");
4108 : : break;
4109 : : }
4110 : :
4111 : : /*
4112 : : * If we got a RETURNING result, return it to caller. We'll continue
4113 : : * the work on next call.
4114 : : */
5300 tgl@sss.pgh.pa.us 4115 [ + + ]:CBC 6662864 : if (slot)
4116 : 3300 : return slot;
4117 : : }
4118 : :
4119 : : /*
4120 : : * Insert remaining tuples for batch insert.
4121 : : */
506 efujita@postgresql.o 4122 [ + + ]: 55622 : if (estate->es_insert_pending_result_relations != NIL)
4123 : 12 : ExecPendingInserts(estate);
4124 : :
4125 : : /*
4126 : : * We're done, but fire AFTER STATEMENT triggers before exiting.
4127 : : */
5300 tgl@sss.pgh.pa.us 4128 : 55622 : fireASTriggers(node);
4129 : :
4797 4130 : 55622 : node->mt_done = true;
4131 : :
5300 4132 : 55622 : return NULL;
4133 : : }
4134 : :
4135 : : /*
4136 : : * ExecLookupResultRelByOid
4137 : : * If the table with given OID is among the result relations to be
4138 : : * updated by the given ModifyTable node, return its ResultRelInfo.
4139 : : *
4140 : : * If not found, return NULL if missing_ok, else raise error.
4141 : : *
4142 : : * If update_cache is true, then upon successful lookup, update the node's
4143 : : * one-element cache. ONLY ExecModifyTable may pass true for this.
4144 : : */
4145 : : ResultRelInfo *
1104 4146 : 6219 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4147 : : bool missing_ok, bool update_cache)
4148 : : {
4149 [ + + ]: 6219 : if (node->mt_resultOidHash)
4150 : : {
4151 : : /* Use the pre-built hash table to locate the rel */
4152 : : MTTargetRelLookup *mtlookup;
4153 : :
4154 : : mtlookup = (MTTargetRelLookup *)
4155 : 547 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4156 [ + - ]: 547 : if (mtlookup)
4157 : : {
4158 [ + + ]: 547 : if (update_cache)
4159 : : {
4160 : 397 : node->mt_lastResultOid = resultoid;
4161 : 397 : node->mt_lastResultIndex = mtlookup->relationIndex;
4162 : : }
4163 : 547 : return node->resultRelInfo + mtlookup->relationIndex;
4164 : : }
4165 : : }
4166 : : else
4167 : : {
4168 : : /* With few target rels, just search the ResultRelInfo array */
4169 [ + + ]: 10533 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4170 : : {
4171 : 6038 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4172 : :
4173 [ + + ]: 6038 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4174 : : {
4175 [ + + ]: 1177 : if (update_cache)
4176 : : {
4177 : 1086 : node->mt_lastResultOid = resultoid;
4178 : 1086 : node->mt_lastResultIndex = ndx;
4179 : : }
4180 : 1177 : return rInfo;
4181 : : }
4182 : : }
4183 : : }
4184 : :
4185 [ - + ]: 4495 : if (!missing_ok)
1104 tgl@sss.pgh.pa.us 4186 [ # # ]:UBC 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
1104 tgl@sss.pgh.pa.us 4187 :CBC 4495 : return NULL;
4188 : : }
4189 : :
4190 : : /* ----------------------------------------------------------------
4191 : : * ExecInitModifyTable
4192 : : * ----------------------------------------------------------------
4193 : : */
4194 : : ModifyTableState *
5300 4195 : 57703 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4196 : : {
4197 : : ModifyTableState *mtstate;
1110 4198 : 57703 : Plan *subplan = outerPlan(node);
5300 4199 : 57703 : CmdType operation = node->operation;
1110 4200 : 57703 : int nrels = list_length(node->resultRelations);
4201 : : ResultRelInfo *resultRelInfo;
4202 : : List *arowmarks;
4203 : : ListCell *l;
4204 : : int i;
4205 : : Relation rel;
4206 : :
4207 : : /* check for unsupported flags */
5300 4208 [ - + ]: 57703 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4209 : :
4210 : : /*
4211 : : * create state structure
4212 : : */
4213 : 57703 : mtstate = makeNode(ModifyTableState);
4214 : 57703 : mtstate->ps.plan = (Plan *) node;
4215 : 57703 : mtstate->ps.state = estate;
2463 andres@anarazel.de 4216 : 57703 : mtstate->ps.ExecProcNode = ExecModifyTable;
4217 : :
4797 tgl@sss.pgh.pa.us 4218 : 57703 : mtstate->operation = operation;
4219 : 57703 : mtstate->canSetTag = node->canSetTag;
4220 : 57703 : mtstate->mt_done = false;
4221 : :
1110 4222 : 57703 : mtstate->mt_nrels = nrels;
1279 heikki.linnakangas@i 4223 : 57703 : mtstate->resultRelInfo = (ResultRelInfo *)
1110 tgl@sss.pgh.pa.us 4224 : 57703 : palloc(nrels * sizeof(ResultRelInfo));
4225 : :
15 dean.a.rasheed@gmail 4226 :GNC 57703 : mtstate->mt_merge_pending_not_matched = NULL;
748 alvherre@alvh.no-ip. 4227 :CBC 57703 : mtstate->mt_merge_inserted = 0;
4228 : 57703 : mtstate->mt_merge_updated = 0;
4229 : 57703 : mtstate->mt_merge_deleted = 0;
4230 : :
4231 : : /*----------
4232 : : * Resolve the target relation. This is the same as:
4233 : : *
4234 : : * - the relation for which we will fire FOR STATEMENT triggers,
4235 : : * - the relation into whose tuple format all captured transition tuples
4236 : : * must be converted, and
4237 : : * - the root partitioned table used for tuple routing.
4238 : : *
4239 : : * If it's a partitioned or inherited table, the root partition or
4240 : : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4241 : : * given explicitly in node->rootRelation. Otherwise, the target relation
4242 : : * is the sole relation in the node->resultRelations list.
4243 : : *----------
4244 : : */
1279 heikki.linnakangas@i 4245 [ + + ]: 57703 : if (node->rootRelation > 0)
4246 : : {
4247 : 1260 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4248 : 1260 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4249 : : node->rootRelation);
4250 : : }
4251 : : else
4252 : : {
173 tgl@sss.pgh.pa.us 4253 [ - + ]: 56443 : Assert(list_length(node->resultRelations) == 1);
1273 heikki.linnakangas@i 4254 : 56443 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4255 : 56443 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4256 : 56443 : linitial_int(node->resultRelations));
4257 : : }
4258 : :
4259 : : /* set up epqstate with dummy subplan data for the moment */
331 tgl@sss.pgh.pa.us 4260 : 57703 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4261 : : node->epqParam, node->resultRelations);
5300 4262 : 57703 : mtstate->fireBSTriggers = true;
4263 : :
4264 : : /*
4265 : : * Build state for collecting transition tuples. This requires having a
4266 : : * valid trigger query context, so skip it in explain-only mode.
4267 : : */
1273 heikki.linnakangas@i 4268 [ + + ]: 57703 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4269 : 57288 : ExecSetupTransitionCaptureState(mtstate, estate);
4270 : :
4271 : : /*
4272 : : * Open all the result relations and initialize the ResultRelInfo structs.
4273 : : * (But root relation was initialized above, if it's part of the array.)
4274 : : * We must do this before initializing the subplan, because direct-modify
4275 : : * FDWs expect their ResultRelInfos to be available.
4276 : : */
4797 tgl@sss.pgh.pa.us 4277 : 57703 : resultRelInfo = mtstate->resultRelInfo;
5300 4278 : 57703 : i = 0;
1110 4279 [ + - + + : 116426 : foreach(l, node->resultRelations)
+ + ]
4280 : : {
1279 heikki.linnakangas@i 4281 : 58860 : Index resultRelation = lfirst_int(l);
45 dean.a.rasheed@gmail 4282 :GNC 58860 : List *mergeActions = NIL;
4283 : :
4284 [ + + ]: 58860 : if (node->mergeActionLists)
4285 : 805 : mergeActions = list_nth(node->mergeActionLists, i);
4286 : :
1273 heikki.linnakangas@i 4287 [ + + ]:CBC 58860 : if (resultRelInfo != mtstate->rootResultRelInfo)
4288 : : {
4289 : 2417 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4290 : :
4291 : : /*
4292 : : * For child result relations, store the root result relation
4293 : : * pointer. We do so for the convenience of places that want to
4294 : : * look at the query's original target relation but don't have the
4295 : : * mtstate handy.
4296 : : */
1104 tgl@sss.pgh.pa.us 4297 : 2417 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4298 : : }
4299 : :
4300 : : /* Initialize the usesFdwDirectModify flag */
759 alvherre@alvh.no-ip. 4301 : 58860 : resultRelInfo->ri_usesFdwDirectModify =
4302 : 58860 : bms_is_member(i, node->fdwDirectModifyPlans);
4303 : :
4304 : : /*
4305 : : * Verify result relation is a valid target for the current operation
4306 : : */
45 dean.a.rasheed@gmail 4307 :GNC 58860 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4308 : :
1110 tgl@sss.pgh.pa.us 4309 :CBC 58723 : resultRelInfo++;
4310 : 58723 : i++;
4311 : : }
4312 : :
4313 : : /*
4314 : : * Now we may initialize the subplan.
4315 : : */
4316 : 57566 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4317 : :
4318 : : /*
4319 : : * Do additional per-result-relation initialization.
4320 : : */
4321 [ + + ]: 116272 : for (i = 0; i < nrels; i++)
4322 : : {
4323 : 58706 : resultRelInfo = &mtstate->resultRelInfo[i];
4324 : :
4325 : : /* Let FDWs init themselves for foreign-table result rels */
2949 rhaas@postgresql.org 4326 [ + + ]: 58706 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4327 [ + + ]: 58602 : resultRelInfo->ri_FdwRoutine != NULL &&
4053 tgl@sss.pgh.pa.us 4328 [ + - ]: 153 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4329 : : {
4330 : 153 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4331 : :
4332 : 153 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4333 : : resultRelInfo,
4334 : : fdw_private,
4335 : : i,
4336 : : eflags);
4337 : : }
4338 : :
4339 : : /*
4340 : : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4341 : : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4342 : : * tables, the FDW might have created additional junk attr(s), but
4343 : : * those are no concern of ours.
4344 : : */
748 alvherre@alvh.no-ip. 4345 [ + + + + : 58706 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
+ + ]
4346 : : operation == CMD_MERGE)
4347 : : {
4348 : : char relkind;
4349 : :
1104 tgl@sss.pgh.pa.us 4350 : 14621 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4351 [ + + + + ]: 14621 : if (relkind == RELKIND_RELATION ||
4352 [ + + ]: 317 : relkind == RELKIND_MATVIEW ||
4353 : : relkind == RELKIND_PARTITIONED_TABLE)
4354 : : {
4355 : 14322 : resultRelInfo->ri_RowIdAttNo =
4356 : 14322 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4357 [ - + ]: 14322 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1104 tgl@sss.pgh.pa.us 4358 [ # # ]:UBC 0 : elog(ERROR, "could not find junk ctid column");
4359 : : }
1104 tgl@sss.pgh.pa.us 4360 [ + + ]:CBC 299 : else if (relkind == RELKIND_FOREIGN_TABLE)
4361 : : {
4362 : : /*
4363 : : * We don't support MERGE with foreign tables for now. (It's
4364 : : * problematic because the implementation uses CTID.)
4365 : : */
748 alvherre@alvh.no-ip. 4366 [ - + ]: 170 : Assert(operation != CMD_MERGE);
4367 : :
4368 : : /*
4369 : : * When there is a row-level trigger, there should be a
4370 : : * wholerow attribute. We also require it to be present in
4371 : : * UPDATE and MERGE, so we can get the values of unchanged
4372 : : * columns.
4373 : : */
1104 tgl@sss.pgh.pa.us 4374 : 170 : resultRelInfo->ri_RowIdAttNo =
4375 : 170 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4376 : : "wholerow");
748 alvherre@alvh.no-ip. 4377 [ + + - + ]: 170 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
1104 tgl@sss.pgh.pa.us 4378 [ - + ]: 95 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1104 tgl@sss.pgh.pa.us 4379 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4380 : : }
4381 : : else
4382 : : {
4383 : : /* Other valid target relkinds must provide wholerow */
1104 tgl@sss.pgh.pa.us 4384 :CBC 129 : resultRelInfo->ri_RowIdAttNo =
4385 : 129 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4386 : : "wholerow");
4387 [ - + ]: 129 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
1104 tgl@sss.pgh.pa.us 4388 [ # # ]:UBC 0 : elog(ERROR, "could not find junk wholerow column");
4389 : : }
4390 : : }
4391 : : }
4392 : :
4393 : : /*
4394 : : * If this is an inherited update/delete/merge, there will be a junk
4395 : : * attribute named "tableoid" present in the subplan's targetlist. It
4396 : : * will be used to identify the result relation for a given tuple to be
4397 : : * updated/deleted/merged.
4398 : : */
1104 tgl@sss.pgh.pa.us 4399 :CBC 57566 : mtstate->mt_resultOidAttno =
4400 : 57566 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4401 [ + + - + ]: 57566 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4402 : 57566 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4403 : 57566 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4404 : :
4405 : : /* Get the root target relation */
1273 heikki.linnakangas@i 4406 : 57566 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4407 : :
4408 : : /*
4409 : : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4410 : : * or MERGE might need this too, but only if it actually moves tuples
4411 : : * between partitions; in that case setup is done by
4412 : : * ExecCrossPartitionUpdate.
4413 : : */
2277 rhaas@postgresql.org 4414 [ + + + + ]: 57566 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4415 : : operation == CMD_INSERT)
2243 4416 : 3373 : mtstate->mt_partition_tuple_routing =
1104 tgl@sss.pgh.pa.us 4417 : 3373 : ExecSetupPartitionTupleRouting(estate, rel);
4418 : :
4419 : : /*
4420 : : * Initialize any WITH CHECK OPTION constraints if needed.
4421 : : */
3923 sfrost@snowman.net 4422 : 57566 : resultRelInfo = mtstate->resultRelInfo;
4423 [ + + + + : 58227 : foreach(l, node->withCheckOptionLists)
+ + ]
4424 : : {
4425 : 661 : List *wcoList = (List *) lfirst(l);
4426 : 661 : List *wcoExprs = NIL;
4427 : : ListCell *ll;
4428 : :
4429 [ + - + + : 1804 : foreach(ll, wcoList)
+ + ]
4430 : : {
4431 : 1143 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
2588 andres@anarazel.de 4432 : 1143 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4433 : : &mtstate->ps);
4434 : :
3923 sfrost@snowman.net 4435 : 1143 : wcoExprs = lappend(wcoExprs, wcoExpr);
4436 : : }
4437 : :
4438 : 661 : resultRelInfo->ri_WithCheckOptions = wcoList;
4439 : 661 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4440 : 661 : resultRelInfo++;
4441 : : }
4442 : :
4443 : : /*
4444 : : * Initialize RETURNING projections if needed.
4445 : : */
5300 tgl@sss.pgh.pa.us 4446 [ + + ]: 57566 : if (node->returningLists)
4447 : : {
4448 : : TupleTableSlot *slot;
4449 : : ExprContext *econtext;
4450 : :
4451 : : /*
4452 : : * Initialize result tuple slot and assign its rowtype using the first
4453 : : * RETURNING list. We assume the rest will look the same.
4454 : : */
2298 andres@anarazel.de 4455 : 2097 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4456 : :
4457 : : /* Set up a slot for the output of the RETURNING projection(s) */
1977 4458 : 2097 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5300 tgl@sss.pgh.pa.us 4459 : 2097 : slot = mtstate->ps.ps_ResultTupleSlot;
4460 : :
4461 : : /* Need an econtext too */
2588 andres@anarazel.de 4462 [ + - ]: 2097 : if (mtstate->ps.ps_ExprContext == NULL)
4463 : 2097 : ExecAssignExprContext(estate, &mtstate->ps);
4464 : 2097 : econtext = mtstate->ps.ps_ExprContext;
4465 : :
4466 : : /*
4467 : : * Build a projection for each result rel.
4468 : : */
4797 tgl@sss.pgh.pa.us 4469 : 2097 : resultRelInfo = mtstate->resultRelInfo;
5300 4470 [ + - + + : 4358 : foreach(l, node->returningLists)
+ + ]
4471 : : {
4472 : 2261 : List *rlist = (List *) lfirst(l);
4473 : :
2200 rhaas@postgresql.org 4474 : 2261 : resultRelInfo->ri_returningList = rlist;
5300 tgl@sss.pgh.pa.us 4475 : 2261 : resultRelInfo->ri_projectReturning =
2588 andres@anarazel.de 4476 : 2261 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
2489 tgl@sss.pgh.pa.us 4477 : 2261 : resultRelInfo->ri_RelationDesc->rd_att);
5300 4478 : 2261 : resultRelInfo++;
4479 : : }
4480 : : }
4481 : : else
4482 : : {
4483 : : /*
4484 : : * We still must construct a dummy result tuple type, because InitPlan
4485 : : * expects one (maybe should change that?).
4486 : : */
2298 andres@anarazel.de 4487 : 55469 : mtstate->ps.plan->targetlist = NIL;
1983 4488 : 55469 : ExecInitResultTypeTL(&mtstate->ps);
4489 : :
5300 tgl@sss.pgh.pa.us 4490 : 55469 : mtstate->ps.ps_ExprContext = NULL;
4491 : : }
4492 : :
4493 : : /* Set the list of arbiter indexes if needed for ON CONFLICT */
2211 alvherre@alvh.no-ip. 4494 : 57566 : resultRelInfo = mtstate->resultRelInfo;
4495 [ + + ]: 57566 : if (node->onConflictAction != ONCONFLICT_NONE)
4496 : : {
4497 : : /* insert may only have one relation, inheritance is not expanded */
1104 tgl@sss.pgh.pa.us 4498 [ - + ]: 594 : Assert(nrels == 1);
2211 alvherre@alvh.no-ip. 4499 : 594 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4500 : : }
4501 : :
4502 : : /*
4503 : : * If needed, Initialize target list, projection and qual for ON CONFLICT
4504 : : * DO UPDATE.
4505 : : */
3264 andres@anarazel.de 4506 [ + + ]: 57566 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4507 : : {
1070 tgl@sss.pgh.pa.us 4508 : 450 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4509 : : ExprContext *econtext;
4510 : : TupleDesc relationDesc;
4511 : :
4512 : : /* already exists if created by RETURNING processing above */
3264 andres@anarazel.de 4513 [ + + ]: 450 : if (mtstate->ps.ps_ExprContext == NULL)
4514 : 316 : ExecAssignExprContext(estate, &mtstate->ps);
4515 : :
4516 : 450 : econtext = mtstate->ps.ps_ExprContext;
2249 4517 : 450 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4518 : :
4519 : : /* create state for DO UPDATE SET operation */
1070 tgl@sss.pgh.pa.us 4520 : 450 : resultRelInfo->ri_onConflict = onconfl;
4521 : :
4522 : : /* initialize slot for the existing tuple */
4523 : 450 : onconfl->oc_Existing =
1861 andres@anarazel.de 4524 : 450 : table_slot_create(resultRelInfo->ri_RelationDesc,
4525 : 450 : &mtstate->ps.state->es_tupleTable);
4526 : :
4527 : : /*
4528 : : * Create the tuple slot for the UPDATE SET projection. We want a slot
4529 : : * of the table's type here, because the slot will be used to insert
4530 : : * into the table, and for RETURNING processing - which may access
4531 : : * system attributes.
4532 : : */
1070 tgl@sss.pgh.pa.us 4533 : 450 : onconfl->oc_ProjSlot =
4534 : 450 : table_slot_create(resultRelInfo->ri_RelationDesc,
4535 : 450 : &mtstate->ps.state->es_tupleTable);
4536 : :
4537 : : /* build UPDATE SET projection state */
4538 : 450 : onconfl->oc_ProjInfo =
4539 : 450 : ExecBuildUpdateProjection(node->onConflictSet,
4540 : : true,
4541 : : node->onConflictCols,
4542 : : relationDesc,
4543 : : econtext,
4544 : : onconfl->oc_ProjSlot,
4545 : : &mtstate->ps);
4546 : :
4547 : : /* initialize state to evaluate the WHERE clause, if any */
3264 andres@anarazel.de 4548 [ + + ]: 450 : if (node->onConflictWhere)
4549 : : {
4550 : : ExprState *qualexpr;
4551 : :
2588 4552 : 88 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4553 : : &mtstate->ps);
1070 tgl@sss.pgh.pa.us 4554 : 88 : onconfl->oc_WhereClause = qualexpr;
4555 : : }
4556 : : }
4557 : :
4558 : : /*
4559 : : * If we have any secondary relations in an UPDATE or DELETE, they need to
4560 : : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4561 : : * EvalPlanQual mechanism needs to be told about them. This also goes for
4562 : : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4563 : : */
1110 4564 : 57566 : arowmarks = NIL;
5284 4565 [ + + + + : 58818 : foreach(l, node->rowMarks)
+ + ]
4566 : : {
2561 4567 : 1252 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4568 : : ExecRowMark *erm;
4569 : : ExecAuxRowMark *aerm;
4570 : :
4571 : : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5284 4572 [ + + ]: 1252 : if (rc->isParent)
4573 : 50 : continue;
4574 : :
4575 : : /* Find ExecRowMark and build ExecAuxRowMark */
3260 4576 : 1202 : erm = ExecFindRowMark(estate, rc->rti, false);
1110 4577 : 1202 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4578 : 1202 : arowmarks = lappend(arowmarks, aerm);
4579 : : }
4580 : :
4581 : : /* For a MERGE command, initialize its state */
748 alvherre@alvh.no-ip. 4582 [ + + ]: 57566 : if (mtstate->operation == CMD_MERGE)
4583 : 689 : ExecInitMerge(mtstate, estate);
4584 : :
1110 tgl@sss.pgh.pa.us 4585 : 57566 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4586 : :
4587 : : /*
4588 : : * If there are a lot of result relations, use a hash table to speed the
4589 : : * lookups. If there are not a lot, a simple linear search is faster.
4590 : : *
4591 : : * It's not clear where the threshold is, but try 64 for starters. In a
4592 : : * debugging build, use a small threshold so that we get some test
4593 : : * coverage of both code paths.
4594 : : */
4595 : : #ifdef USE_ASSERT_CHECKING
4596 : : #define MT_NRELS_HASH 4
4597 : : #else
4598 : : #define MT_NRELS_HASH 64
4599 : : #endif
4600 [ + + ]: 57566 : if (nrels >= MT_NRELS_HASH)
4601 : : {
4602 : : HASHCTL hash_ctl;
4603 : :
4604 : 158 : hash_ctl.keysize = sizeof(Oid);
4605 : 158 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4606 : 158 : hash_ctl.hcxt = CurrentMemoryContext;
4607 : 158 : mtstate->mt_resultOidHash =
4608 : 158 : hash_create("ModifyTable target hash",
4609 : : nrels, &hash_ctl,
4610 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4611 [ + + ]: 889 : for (i = 0; i < nrels; i++)
4612 : : {
4613 : : Oid hashkey;
4614 : : MTTargetRelLookup *mtlookup;
4615 : : bool found;
4616 : :
4617 : 731 : resultRelInfo = &mtstate->resultRelInfo[i];
4618 : 731 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4619 : : mtlookup = (MTTargetRelLookup *)
4620 : 731 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4621 : : HASH_ENTER, &found);
4622 [ - + ]: 731 : Assert(!found);
4623 : 731 : mtlookup->relationIndex = i;
4624 : : }
4625 : : }
4626 : : else
4627 : 57408 : mtstate->mt_resultOidHash = NULL;
4628 : :
4629 : : /*
4630 : : * Determine if the FDW supports batch insert and determine the batch size
4631 : : * (a FDW may support batching, but it may be disabled for the
4632 : : * server/table).
4633 : : *
4634 : : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4635 : : * remains set to 0.
4636 : : */
1179 tomas.vondra@postgre 4637 [ + + ]: 57566 : if (operation == CMD_INSERT)
4638 : : {
4639 : : /* insert may only have one relation, inheritance is not expanded */
1104 tgl@sss.pgh.pa.us 4640 [ - + ]: 44085 : Assert(nrels == 1);
1179 tomas.vondra@postgre 4641 : 44085 : resultRelInfo = mtstate->resultRelInfo;
1104 tgl@sss.pgh.pa.us 4642 [ + - ]: 44085 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4643 [ + + ]: 44085 : resultRelInfo->ri_FdwRoutine != NULL &&
4644 [ + - ]: 87 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4645 [ + - ]: 87 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4646 : : {
4647 : 87 : resultRelInfo->ri_BatchSize =
4648 : 87 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
1179 tomas.vondra@postgre 4649 [ - + ]: 87 : Assert(resultRelInfo->ri_BatchSize >= 1);
4650 : : }
4651 : : else
1104 tgl@sss.pgh.pa.us 4652 : 43998 : resultRelInfo->ri_BatchSize = 1;
4653 : : }
4654 : :
4655 : : /*
4656 : : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4657 : : * to estate->es_auxmodifytables so that it will be run to completion by
4658 : : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4659 : : * ModifyTable node too, but there's no need.) Note the use of lcons not
4660 : : * lappend: we need later-initialized ModifyTable nodes to be shut down
4661 : : * before earlier ones. This ensures that we don't throw away RETURNING
4662 : : * rows that need to be seen by a later CTE subplan.
4663 : : */
4797 4664 [ + + ]: 57566 : if (!mtstate->canSetTag)
4665 : 464 : estate->es_auxmodifytables = lcons(mtstate,
4666 : : estate->es_auxmodifytables);
4667 : :
5300 4668 : 57566 : return mtstate;
4669 : : }
4670 : :
4671 : : /* ----------------------------------------------------------------
4672 : : * ExecEndModifyTable
4673 : : *
4674 : : * Shuts down the plan.
4675 : : *
4676 : : * Returns nothing of interest.
4677 : : * ----------------------------------------------------------------
4678 : : */
4679 : : void
4680 : 55525 : ExecEndModifyTable(ModifyTableState *node)
4681 : : {
4682 : : int i;
4683 : :
4684 : : /*
4685 : : * Allow any FDWs to shut down
4686 : : */
1110 4687 [ + + ]: 112057 : for (i = 0; i < node->mt_nrels; i++)
4688 : : {
4689 : : int j;
4053 4690 : 56532 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4691 : :
2949 rhaas@postgresql.org 4692 [ + + ]: 56532 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4693 [ + + ]: 56436 : resultRelInfo->ri_FdwRoutine != NULL &&
4053 tgl@sss.pgh.pa.us 4694 [ + - ]: 143 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4695 : 143 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4696 : : resultRelInfo);
4697 : :
4698 : : /*
4699 : : * Cleanup the initialized batch slots. This only matters for FDWs
4700 : : * with batching, but the other cases will have ri_NumSlotsInitialized
4701 : : * == 0.
4702 : : */
1038 tomas.vondra@postgre 4703 [ + + ]: 56560 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4704 : : {
4705 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4706 : 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4707 : : }
4708 : : }
4709 : :
4710 : : /*
4711 : : * Close all the partitioned tables, leaf partitions, and their indices
4712 : : * and release the slot used for tuple routing, if set.
4713 : : */
2292 rhaas@postgresql.org 4714 [ + + ]: 55525 : if (node->mt_partition_tuple_routing)
4715 : : {
2200 4716 : 3376 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4717 : :
1976 alvherre@alvh.no-ip. 4718 [ + + ]: 3376 : if (node->mt_root_tuple_slot)
4719 : 277 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4720 : : }
4721 : :
4722 : : /*
4723 : : * Terminate EPQ execution if active
4724 : : */
5284 tgl@sss.pgh.pa.us 4725 : 55525 : EvalPlanQualEnd(&node->mt_epqstate);
4726 : :
4727 : : /*
4728 : : * shut down subplan
4729 : : */
1110 4730 : 55525 : ExecEndNode(outerPlanState(node));
5300 4731 : 55525 : }
4732 : :
4733 : : void
5025 tgl@sss.pgh.pa.us 4734 :UBC 0 : ExecReScanModifyTable(ModifyTableState *node)
4735 : : {
4736 : : /*
4737 : : * Currently, we don't need to support rescan on ModifyTable nodes. The
4738 : : * semantics of that would be a bit debatable anyway.
4739 : : */
5300 4740 [ # # ]: 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4741 : : }
|