Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * execMain.c
4 : : * top level executor interface routines
5 : : *
6 : : * INTERFACE ROUTINES
7 : : * ExecutorStart()
8 : : * ExecutorRun()
9 : : * ExecutorFinish()
10 : : * ExecutorEnd()
11 : : *
12 : : * These four procedures are the external interface to the executor.
13 : : * In each case, the query descriptor is required as an argument.
14 : : *
15 : : * ExecutorStart must be called at the beginning of execution of any
16 : : * query plan and ExecutorEnd must always be called at the end of
17 : : * execution of a plan (unless it is aborted due to error).
18 : : *
19 : : * ExecutorRun accepts direction and count arguments that specify whether
20 : : * the plan is to be executed forwards, backwards, and for how many tuples.
21 : : * In some cases ExecutorRun may be called multiple times to process all
22 : : * the tuples for a plan. It is also acceptable to stop short of executing
23 : : * the whole plan (but only if it is a SELECT).
24 : : *
25 : : * ExecutorFinish must be called after the final ExecutorRun call and
26 : : * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 : : * which should also omit ExecutorRun.
28 : : *
29 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
30 : : * Portions Copyright (c) 1994, Regents of the University of California
31 : : *
32 : : *
33 : : * IDENTIFICATION
34 : : * src/backend/executor/execMain.c
35 : : *
36 : : *-------------------------------------------------------------------------
37 : : */
38 : : #include "postgres.h"
39 : :
40 : : #include "access/sysattr.h"
41 : : #include "access/table.h"
42 : : #include "access/tableam.h"
43 : : #include "access/xact.h"
44 : : #include "catalog/namespace.h"
45 : : #include "catalog/partition.h"
46 : : #include "commands/matview.h"
47 : : #include "commands/trigger.h"
48 : : #include "executor/executor.h"
49 : : #include "executor/nodeSubplan.h"
50 : : #include "foreign/fdwapi.h"
51 : : #include "mb/pg_wchar.h"
52 : : #include "miscadmin.h"
53 : : #include "parser/parse_relation.h"
54 : : #include "rewrite/rewriteHandler.h"
55 : : #include "tcop/utility.h"
56 : : #include "utils/acl.h"
57 : : #include "utils/backend_status.h"
58 : : #include "utils/lsyscache.h"
59 : : #include "utils/partcache.h"
60 : : #include "utils/rls.h"
61 : : #include "utils/snapmgr.h"
62 : :
63 : :
64 : : /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
65 : : ExecutorStart_hook_type ExecutorStart_hook = NULL;
66 : : ExecutorRun_hook_type ExecutorRun_hook = NULL;
67 : : ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
68 : : ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
69 : :
70 : : /* Hook for plugin to get control in ExecCheckPermissions() */
71 : : ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
72 : :
73 : : /* decls for local routines only used within this module */
74 : : static void InitPlan(QueryDesc *queryDesc, int eflags);
75 : : static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
76 : : static void ExecPostprocessPlan(EState *estate);
77 : : static void ExecEndPlan(PlanState *planstate, EState *estate);
78 : : static void ExecutePlan(EState *estate, PlanState *planstate,
79 : : bool use_parallel_mode,
80 : : CmdType operation,
81 : : bool sendTuples,
82 : : uint64 numberTuples,
83 : : ScanDirection direction,
84 : : DestReceiver *dest,
85 : : bool execute_once);
86 : : static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
87 : : static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
88 : : Bitmapset *modifiedCols,
89 : : AclMode requiredPerms);
90 : : static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
91 : : static char *ExecBuildSlotValueDescription(Oid reloid,
92 : : TupleTableSlot *slot,
93 : : TupleDesc tupdesc,
94 : : Bitmapset *modifiedCols,
95 : : int maxfieldlen);
96 : : static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
97 : :
98 : : /* end of local decls */
99 : :
100 : :
101 : : /* ----------------------------------------------------------------
102 : : * ExecutorStart
103 : : *
104 : : * This routine must be called at the beginning of any execution of any
105 : : * query plan
106 : : *
107 : : * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
108 : : * only because some places use QueryDescs for utility commands). The tupDesc
109 : : * field of the QueryDesc is filled in to describe the tuples that will be
110 : : * returned, and the internal fields (estate and planstate) are set up.
111 : : *
112 : : * eflags contains flag bits as described in executor.h.
113 : : *
114 : : * NB: the CurrentMemoryContext when this is called will become the parent
115 : : * of the per-query context used for this Executor invocation.
116 : : *
117 : : * We provide a function hook variable that lets loadable plugins
118 : : * get control when ExecutorStart is called. Such a plugin would
119 : : * normally call standard_ExecutorStart().
120 : : *
121 : : * ----------------------------------------------------------------
122 : : */
123 : : void
6620 tgl@sss.pgh.pa.us 124 :CBC 302080 : ExecutorStart(QueryDesc *queryDesc, int eflags)
125 : : {
126 : : /*
127 : : * In some cases (e.g. an EXECUTE statement) a query execution will skip
128 : : * parse analysis, which means that the query_id won't be reported. Note
129 : : * that it's harmless to report the query_id multiple times, as the call
130 : : * will be ignored if the top level query_id has already been reported.
131 : : */
1090 bruce@momjian.us 132 : 302080 : pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
133 : :
5625 tgl@sss.pgh.pa.us 134 [ + + ]: 302080 : if (ExecutorStart_hook)
135 : 47242 : (*ExecutorStart_hook) (queryDesc, eflags);
136 : : else
137 : 254838 : standard_ExecutorStart(queryDesc, eflags);
138 : 301117 : }
139 : :
140 : : void
141 : 302080 : standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
142 : : {
143 : : EState *estate;
144 : : MemoryContext oldcontext;
145 : :
146 : : /* sanity checks: queryDesc must not be started already */
9716 bruce@momjian.us 147 [ - + ]: 302080 : Assert(queryDesc != NULL);
7801 tgl@sss.pgh.pa.us 148 [ - + ]: 302080 : Assert(queryDesc->estate == NULL);
149 : :
150 : : /* caller must ensure the query's snapshot is active */
31 heikki.linnakangas@i 151 [ - + ]:GNC 302080 : Assert(GetActiveSnapshot() == queryDesc->snapshot);
152 : :
153 : : /*
154 : : * If the transaction is read-only, we need to check if any writes are
155 : : * planned to non-temporary tables. EXPLAIN is considered read-only.
156 : : *
157 : : * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
158 : : * would require (a) storing the combo CID hash in shared memory, rather
159 : : * than synchronizing it just once at the start of parallelism, and (b) an
160 : : * alternative to heap_update()'s reliance on xmax for mutual exclusion.
161 : : * INSERT may have no such troubles, but we forbid it to simplify the
162 : : * checks.
163 : : *
164 : : * We have lower-level defenses in CommandCounterIncrement and elsewhere
165 : : * against performing unsafe operations in parallel mode, but this gives a
166 : : * more user-friendly error message.
167 : : */
3272 rhaas@postgresql.org 168 [ + + + + ]:CBC 302080 : if ((XactReadOnly || IsInParallelMode()) &&
169 [ + - ]: 65876 : !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
6263 tgl@sss.pgh.pa.us 170 : 65876 : ExecCheckXactReadOnly(queryDesc->plannedstmt);
171 : :
172 : : /*
173 : : * Build EState, switch into per-query memory context for startup.
174 : : */
7801 175 : 302072 : estate = CreateExecutorState();
176 : 302072 : queryDesc->estate = estate;
177 : :
7791 178 : 302072 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
179 : :
180 : : /*
181 : : * Fill in external parameters, if any, from queryDesc; and allocate
182 : : * workspace for internal parameters
183 : : */
7801 184 : 302072 : estate->es_param_list_info = queryDesc->params;
185 : :
2344 rhaas@postgresql.org 186 [ + + ]: 302072 : if (queryDesc->plannedstmt->paramExecTypes != NIL)
187 : : {
188 : : int nParamExec;
189 : :
190 : 87894 : nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
9544 bruce@momjian.us 191 : 87894 : estate->es_param_exec_vals = (ParamExecData *)
2344 rhaas@postgresql.org 192 : 87894 : palloc0(nParamExec * sizeof(ParamExecData));
193 : : }
194 : :
195 : : /* We now require all callers to provide sourceText */
1095 tgl@sss.pgh.pa.us 196 [ - + ]: 302072 : Assert(queryDesc->sourceText != NULL);
2608 rhaas@postgresql.org 197 : 302072 : estate->es_sourceText = queryDesc->sourceText;
198 : :
199 : : /*
200 : : * Fill in the query environment, if any, from queryDesc.
201 : : */
2571 kgrittn@postgresql.o 202 : 302072 : estate->es_queryEnv = queryDesc->queryEnv;
203 : :
204 : : /*
205 : : * If non-read-only query, set the command ID to mark output tuples with
206 : : */
5980 tgl@sss.pgh.pa.us 207 [ + + - ]: 302072 : switch (queryDesc->operation)
208 : : {
209 : 245049 : case CMD_SELECT:
210 : :
211 : : /*
212 : : * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
213 : : * tuples
214 : : */
4409 215 [ + + ]: 245049 : if (queryDesc->plannedstmt->rowMarks != NIL ||
4797 216 [ + + ]: 241058 : queryDesc->plannedstmt->hasModifyingCTE)
5980 217 : 4058 : estate->es_output_cid = GetCurrentCommandId(true);
218 : :
219 : : /*
220 : : * A SELECT without modifying CTEs can't possibly queue triggers,
221 : : * so force skip-triggers mode. This is just a marginal efficiency
222 : : * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
223 : : * all that expensive, but we might as well do it.
224 : : */
4795 225 [ + + ]: 245049 : if (!queryDesc->plannedstmt->hasModifyingCTE)
226 : 244979 : eflags |= EXEC_FLAG_SKIP_TRIGGERS;
5980 227 : 245049 : break;
228 : :
229 : 57023 : case CMD_INSERT:
230 : : case CMD_DELETE:
231 : : case CMD_UPDATE:
232 : : case CMD_MERGE:
233 : 57023 : estate->es_output_cid = GetCurrentCommandId(true);
234 : 57023 : break;
235 : :
5980 tgl@sss.pgh.pa.us 236 :UBC 0 : default:
237 [ # # ]: 0 : elog(ERROR, "unrecognized operation code: %d",
238 : : (int) queryDesc->operation);
239 : : break;
240 : : }
241 : :
242 : : /*
243 : : * Copy other important information into the EState
244 : : */
5816 alvherre@alvh.no-ip. 245 :CBC 302072 : estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
246 : 302072 : estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
4795 tgl@sss.pgh.pa.us 247 : 302072 : estate->es_top_eflags = eflags;
5234 rhaas@postgresql.org 248 : 302072 : estate->es_instrument = queryDesc->instrument_options;
2212 tgl@sss.pgh.pa.us 249 : 302072 : estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
250 : :
251 : : /*
252 : : * Set up an AFTER-trigger statement context, unless told not to, or
253 : : * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
254 : : */
4795 255 [ + + ]: 302072 : if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
256 : 56370 : AfterTriggerBeginQuery();
257 : :
258 : : /*
259 : : * Initialize the plan state tree
260 : : */
2402 261 : 302072 : InitPlan(queryDesc, eflags);
262 : :
7791 263 : 301117 : MemoryContextSwitchTo(oldcontext);
10141 scrappy@hub.org 264 : 301117 : }
265 : :
266 : : /* ----------------------------------------------------------------
267 : : * ExecutorRun
268 : : *
269 : : * This is the main routine of the executor module. It accepts
270 : : * the query descriptor from the traffic cop and executes the
271 : : * query plan.
272 : : *
273 : : * ExecutorStart must have been called already.
274 : : *
275 : : * If direction is NoMovementScanDirection then nothing is done
276 : : * except to start up/shut down the destination. Otherwise,
277 : : * we retrieve up to 'count' tuples in the specified direction.
278 : : *
279 : : * Note: count = 0 is interpreted as no portal limit, i.e., run to
280 : : * completion. Also note that the count limit is only applied to
281 : : * retrieved tuples, not for instance to those inserted/updated/deleted
282 : : * by a ModifyTable plan node.
283 : : *
284 : : * There is no return value, but output tuples (if any) are sent to
285 : : * the destination receiver specified in the QueryDesc; and the number
286 : : * of tuples processed at the top level can be found in
287 : : * estate->es_processed. The total number of tuples processed in all
288 : : * the ExecutorRun calls can be found in estate->es_total_processed.
289 : : *
290 : : * We provide a function hook variable that lets loadable plugins
291 : : * get control when ExecutorRun is called. Such a plugin would
292 : : * normally call standard_ExecutorRun().
293 : : *
294 : : * ----------------------------------------------------------------
295 : : */
296 : : void
7801 tgl@sss.pgh.pa.us 297 : 297728 : ExecutorRun(QueryDesc *queryDesc,
298 : : ScanDirection direction, uint64 count,
299 : : bool execute_once)
300 : : {
5749 301 [ + + ]: 297728 : if (ExecutorRun_hook)
2579 rhaas@postgresql.org 302 : 45942 : (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
303 : : else
304 : 251786 : standard_ExecutorRun(queryDesc, direction, count, execute_once);
5749 tgl@sss.pgh.pa.us 305 : 286024 : }
306 : :
307 : : void
308 : 297728 : standard_ExecutorRun(QueryDesc *queryDesc,
309 : : ScanDirection direction, uint64 count, bool execute_once)
310 : : {
311 : : EState *estate;
312 : : CmdType operation;
313 : : DestReceiver *dest;
314 : : bool sendTuples;
315 : : MemoryContext oldcontext;
316 : :
317 : : /* sanity checks */
7791 318 [ - + ]: 297728 : Assert(queryDesc != NULL);
319 : :
320 : 297728 : estate = queryDesc->estate;
321 : :
322 [ - + ]: 297728 : Assert(estate != NULL);
4795 323 [ - + ]: 297728 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
324 : :
325 : : /* caller must ensure the query's snapshot is active */
31 heikki.linnakangas@i 326 [ - + ]:GNC 297728 : Assert(GetActiveSnapshot() == estate->es_snapshot);
327 : :
328 : : /*
329 : : * Switch into per-query memory context
330 : : */
7791 tgl@sss.pgh.pa.us 331 :CBC 297728 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
332 : :
333 : : /* Allow instrumentation of Executor overall runtime */
5625 334 [ + + ]: 297728 : if (queryDesc->totaltime)
335 : 29390 : InstrStartNode(queryDesc->totaltime);
336 : :
337 : : /*
338 : : * extract information from the query descriptor and the query feature.
339 : : */
9716 bruce@momjian.us 340 : 297728 : operation = queryDesc->operation;
341 : 297728 : dest = queryDesc->dest;
342 : :
343 : : /*
344 : : * startup tuple receiver, if we will be emitting tuples
345 : : */
8082 tgl@sss.pgh.pa.us 346 : 297728 : estate->es_processed = 0;
347 : :
6455 348 [ + + ]: 353734 : sendTuples = (operation == CMD_SELECT ||
5300 349 [ + + ]: 56006 : queryDesc->plannedstmt->hasReturning);
350 : :
6455 351 [ + + ]: 297728 : if (sendTuples)
2411 peter_e@gmx.net 352 : 243625 : dest->rStartup(dest, operation, queryDesc->tupDesc);
353 : :
354 : : /*
355 : : * run plan
356 : : */
5644 tgl@sss.pgh.pa.us 357 [ + + ]: 297707 : if (!ScanDirectionIsNoMovement(direction))
358 : : {
2579 rhaas@postgresql.org 359 [ + + - + ]: 297099 : if (execute_once && queryDesc->already_executed)
2579 rhaas@postgresql.org 360 [ # # ]:UBC 0 : elog(ERROR, "can't re-execute query flagged for single execution");
2579 rhaas@postgresql.org 361 :CBC 297099 : queryDesc->already_executed = true;
362 : :
5644 tgl@sss.pgh.pa.us 363 : 297099 : ExecutePlan(estate,
364 : : queryDesc->planstate,
3103 rhaas@postgresql.org 365 : 297099 : queryDesc->plannedstmt->parallelModeNeeded,
366 : : operation,
367 : : sendTuples,
368 : : count,
369 : : direction,
370 : : dest,
371 : : execute_once);
372 : : }
373 : :
374 : : /*
375 : : * Update es_total_processed to keep track of the number of tuples
376 : : * processed across multiple ExecutorRun() calls.
377 : : */
374 michael@paquier.xyz 378 : 286024 : estate->es_total_processed += estate->es_processed;
379 : :
380 : : /*
381 : : * shutdown tuple receiver, if we started it
382 : : */
6455 tgl@sss.pgh.pa.us 383 [ + + ]: 286024 : if (sendTuples)
2411 peter_e@gmx.net 384 : 233368 : dest->rShutdown(dest);
385 : :
5625 tgl@sss.pgh.pa.us 386 [ + + ]: 286024 : if (queryDesc->totaltime)
387 : 28166 : InstrStopNode(queryDesc->totaltime, estate->es_processed);
388 : :
7791 389 : 286024 : MemoryContextSwitchTo(oldcontext);
10141 scrappy@hub.org 390 : 286024 : }
391 : :
392 : : /* ----------------------------------------------------------------
393 : : * ExecutorFinish
394 : : *
395 : : * This routine must be called after the last ExecutorRun call.
396 : : * It performs cleanup such as firing AFTER triggers. It is
397 : : * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
398 : : * include these actions in the total runtime.
399 : : *
400 : : * We provide a function hook variable that lets loadable plugins
401 : : * get control when ExecutorFinish is called. Such a plugin would
402 : : * normally call standard_ExecutorFinish().
403 : : *
404 : : * ----------------------------------------------------------------
405 : : */
406 : : void
4795 tgl@sss.pgh.pa.us 407 : 279869 : ExecutorFinish(QueryDesc *queryDesc)
408 : : {
409 [ + + ]: 279869 : if (ExecutorFinish_hook)
410 : 40929 : (*ExecutorFinish_hook) (queryDesc);
411 : : else
412 : 238940 : standard_ExecutorFinish(queryDesc);
413 : 279345 : }
414 : :
415 : : void
416 : 279869 : standard_ExecutorFinish(QueryDesc *queryDesc)
417 : : {
418 : : EState *estate;
419 : : MemoryContext oldcontext;
420 : :
421 : : /* sanity checks */
422 [ - + ]: 279869 : Assert(queryDesc != NULL);
423 : :
424 : 279869 : estate = queryDesc->estate;
425 : :
426 [ - + ]: 279869 : Assert(estate != NULL);
427 [ - + ]: 279869 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
428 : :
429 : : /* This should be run once and only once per Executor instance */
430 [ - + ]: 279869 : Assert(!estate->es_finished);
431 : :
432 : : /* Switch into per-query memory context */
433 : 279869 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
434 : :
435 : : /* Allow instrumentation of Executor overall runtime */
436 [ + + ]: 279869 : if (queryDesc->totaltime)
437 : 28166 : InstrStartNode(queryDesc->totaltime);
438 : :
439 : : /* Run ModifyTable nodes to completion */
440 : 279869 : ExecPostprocessPlan(estate);
441 : :
442 : : /* Execute queued AFTER triggers, unless told not to */
443 [ + + ]: 279869 : if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
444 : 54255 : AfterTriggerEndQuery(estate);
445 : :
446 [ + + ]: 279345 : if (queryDesc->totaltime)
447 : 28012 : InstrStopNode(queryDesc->totaltime, 0);
448 : :
449 : 279345 : MemoryContextSwitchTo(oldcontext);
450 : :
451 : 279345 : estate->es_finished = true;
452 : 279345 : }
453 : :
454 : : /* ----------------------------------------------------------------
455 : : * ExecutorEnd
456 : : *
457 : : * This routine must be called at the end of execution of any
458 : : * query plan
459 : : *
460 : : * We provide a function hook variable that lets loadable plugins
461 : : * get control when ExecutorEnd is called. Such a plugin would
462 : : * normally call standard_ExecutorEnd().
463 : : *
464 : : * ----------------------------------------------------------------
465 : : */
466 : : void
7801 467 : 288737 : ExecutorEnd(QueryDesc *queryDesc)
468 : : {
5625 469 [ + + ]: 288737 : if (ExecutorEnd_hook)
470 : 43351 : (*ExecutorEnd_hook) (queryDesc);
471 : : else
472 : 245386 : standard_ExecutorEnd(queryDesc);
473 : 288737 : }
474 : :
475 : : void
476 : 288737 : standard_ExecutorEnd(QueryDesc *queryDesc)
477 : : {
478 : : EState *estate;
479 : : MemoryContext oldcontext;
480 : :
481 : : /* sanity checks */
9716 bruce@momjian.us 482 [ - + ]: 288737 : Assert(queryDesc != NULL);
483 : :
7801 tgl@sss.pgh.pa.us 484 : 288737 : estate = queryDesc->estate;
485 : :
7791 486 [ - + ]: 288737 : Assert(estate != NULL);
487 : :
488 : : /*
489 : : * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
490 : : * Assert is needed because ExecutorFinish is new as of 9.1, and callers
491 : : * might forget to call it.
492 : : */
4795 493 [ + + - + ]: 288737 : Assert(estate->es_finished ||
494 : : (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
495 : :
496 : : /*
497 : : * Switch into per-query memory context to run ExecEndPlan
498 : : */
7791 499 : 288737 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
500 : :
501 : 288737 : ExecEndPlan(queryDesc->planstate, estate);
502 : :
503 : : /* do away with our snapshots */
5816 alvherre@alvh.no-ip. 504 : 288737 : UnregisterSnapshot(estate->es_snapshot);
505 : 288737 : UnregisterSnapshot(estate->es_crosscheck_snapshot);
506 : :
507 : : /*
508 : : * Must switch out of context before destroying it
509 : : */
7791 tgl@sss.pgh.pa.us 510 : 288737 : MemoryContextSwitchTo(oldcontext);
511 : :
512 : : /*
513 : : * Release EState and per-query memory context. This should release
514 : : * everything the executor has allocated.
515 : : */
516 : 288737 : FreeExecutorState(estate);
517 : :
518 : : /* Reset queryDesc fields that no longer point to anything */
519 : 288737 : queryDesc->tupDesc = NULL;
520 : 288737 : queryDesc->estate = NULL;
521 : 288737 : queryDesc->planstate = NULL;
5625 522 : 288737 : queryDesc->totaltime = NULL;
8802 523 : 288737 : }
524 : :
525 : : /* ----------------------------------------------------------------
526 : : * ExecutorRewind
527 : : *
528 : : * This routine may be called on an open queryDesc to rewind it
529 : : * to the start.
530 : : * ----------------------------------------------------------------
531 : : */
532 : : void
7705 533 : 66 : ExecutorRewind(QueryDesc *queryDesc)
534 : : {
535 : : EState *estate;
536 : : MemoryContext oldcontext;
537 : :
538 : : /* sanity checks */
539 [ - + ]: 66 : Assert(queryDesc != NULL);
540 : :
541 : 66 : estate = queryDesc->estate;
542 : :
543 [ - + ]: 66 : Assert(estate != NULL);
544 : :
545 : : /* It's probably not sensible to rescan updating queries */
546 [ - + ]: 66 : Assert(queryDesc->operation == CMD_SELECT);
547 : :
548 : : /*
549 : : * Switch into per-query memory context
550 : : */
551 : 66 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
552 : :
553 : : /*
554 : : * rescan plan
555 : : */
5025 556 : 66 : ExecReScan(queryDesc->planstate);
557 : :
7705 558 : 66 : MemoryContextSwitchTo(oldcontext);
559 : 66 : }
560 : :
561 : :
562 : : /*
563 : : * ExecCheckPermissions
564 : : * Check access permissions of relations mentioned in a query
565 : : *
566 : : * Returns true if permissions are adequate. Otherwise, throws an appropriate
567 : : * error if ereport_on_violation is true, or simply returns false otherwise.
568 : : *
569 : : * Note that this does NOT address row-level security policies (aka: RLS). If
570 : : * rows will be returned to the user as a result of this permission check
571 : : * passing, then RLS also needs to be consulted (and check_enable_rls()).
572 : : *
573 : : * See rewrite/rowsecurity.c.
574 : : *
575 : : * NB: rangeTable is no longer used by us, but kept around for the hooks that
576 : : * might still want to look at the RTEs.
577 : : */
578 : : bool
495 alvherre@alvh.no-ip. 579 : 307119 : ExecCheckPermissions(List *rangeTable, List *rteperminfos,
580 : : bool ereport_on_violation)
581 : : {
582 : : ListCell *l;
5015 rhaas@postgresql.org 583 : 307119 : bool result = true;
584 : :
585 : : #ifdef USE_ASSERT_CHECKING
346 alvherre@alvh.no-ip. 586 : 307119 : Bitmapset *indexset = NULL;
587 : :
588 : : /* Check that rteperminfos is consistent with rangeTable */
589 [ + - + + : 846045 : foreach(l, rangeTable)
+ + ]
590 : : {
591 : 538926 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
592 : :
593 [ + + ]: 538926 : if (rte->perminfoindex != 0)
594 : : {
595 : : /* Sanity checks */
596 : :
597 : : /*
598 : : * Only relation RTEs and subquery RTEs that were once relation
599 : : * RTEs (views) have their perminfoindex set.
600 : : */
306 amitlan@postgresql.o 601 [ + + + - : 248770 : Assert(rte->rtekind == RTE_RELATION ||
- + ]
602 : : (rte->rtekind == RTE_SUBQUERY &&
603 : : rte->relkind == RELKIND_VIEW));
604 : :
346 alvherre@alvh.no-ip. 605 : 248770 : (void) getRTEPermissionInfo(rteperminfos, rte);
606 : : /* Many-to-one mapping not allowed */
607 [ - + ]: 248770 : Assert(!bms_is_member(rte->perminfoindex, indexset));
608 : 248770 : indexset = bms_add_member(indexset, rte->perminfoindex);
609 : : }
610 : : }
611 : :
612 : : /* All rteperminfos are referenced */
613 [ - + ]: 307119 : Assert(bms_num_members(indexset) == list_length(rteperminfos));
614 : : #endif
615 : :
495 616 [ + + + + : 554785 : foreach(l, rteperminfos)
+ + ]
617 : : {
618 : 248473 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
619 : :
620 [ - + ]: 248473 : Assert(OidIsValid(perminfo->relid));
621 : 248473 : result = ExecCheckOneRelPerms(perminfo);
5015 rhaas@postgresql.org 622 [ + + ]: 248473 : if (!result)
623 : : {
624 [ + + ]: 807 : if (ereport_on_violation)
495 alvherre@alvh.no-ip. 625 : 801 : aclcheck_error(ACLCHECK_NO_PRIV,
626 : 801 : get_relkind_objtype(get_rel_relkind(perminfo->relid)),
627 : 801 : get_rel_name(perminfo->relid));
5015 rhaas@postgresql.org 628 : 6 : return false;
629 : : }
630 : : }
631 : :
5028 632 [ + + ]: 306312 : if (ExecutorCheckPerms_hook)
495 alvherre@alvh.no-ip. 633 : 6 : result = (*ExecutorCheckPerms_hook) (rangeTable, rteperminfos,
634 : : ereport_on_violation);
5015 rhaas@postgresql.org 635 : 306312 : return result;
636 : : }
637 : :
638 : : /*
639 : : * ExecCheckOneRelPerms
640 : : * Check access permissions for a single relation.
641 : : */
642 : : static bool
495 alvherre@alvh.no-ip. 643 : 248473 : ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
644 : : {
645 : : AclMode requiredPerms;
646 : : AclMode relPerms;
647 : : AclMode remainingPerms;
648 : : Oid userid;
649 : 248473 : Oid relOid = perminfo->relid;
650 : :
651 : 248473 : requiredPerms = perminfo->requiredPerms;
652 [ - + ]: 248473 : Assert(requiredPerms != 0);
653 : :
654 : : /*
655 : : * userid to check as: current user unless we have a setuid indication.
656 : : *
657 : : * Note: GetUserId() is presently fast enough that there's no harm in
658 : : * calling it separately for each relation. If that stops being true, we
659 : : * could call it once in ExecCheckPermissions and pass the userid down
660 : : * from there. But for now, no need for the extra clutter.
661 : : */
662 : 496946 : userid = OidIsValid(perminfo->checkAsUser) ?
663 [ + + ]: 248473 : perminfo->checkAsUser : GetUserId();
664 : :
665 : : /*
666 : : * We must have *all* the requiredPerms bits, but some of the bits can be
667 : : * satisfied from column-level rather than relation-level permissions.
668 : : * First, remove any bits that are satisfied by relation permissions.
669 : : */
5561 tgl@sss.pgh.pa.us 670 : 248473 : relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
671 : 248473 : remainingPerms = requiredPerms & ~relPerms;
672 [ + + ]: 248473 : if (remainingPerms != 0)
673 : : {
3264 andres@anarazel.de 674 : 1258 : int col = -1;
675 : :
676 : : /*
677 : : * If we lack any permissions that exist only as relation permissions,
678 : : * we can fail straight away.
679 : : */
5561 tgl@sss.pgh.pa.us 680 [ + + ]: 1258 : if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
5015 rhaas@postgresql.org 681 : 75 : return false;
682 : :
683 : : /*
684 : : * Check to see if we have the needed privileges at column level.
685 : : *
686 : : * Note: failures just report a table-level error; it would be nicer
687 : : * to report a column-level error if we have some but not all of the
688 : : * column privileges.
689 : : */
5561 tgl@sss.pgh.pa.us 690 [ + + ]: 1183 : if (remainingPerms & ACL_SELECT)
691 : : {
692 : : /*
693 : : * When the query doesn't explicitly reference any columns (for
694 : : * example, SELECT COUNT(*) FROM table), allow the query if we
695 : : * have SELECT on any column of the rel, as per SQL spec.
696 : : */
495 alvherre@alvh.no-ip. 697 [ + + ]: 699 : if (bms_is_empty(perminfo->selectedCols))
698 : : {
5561 tgl@sss.pgh.pa.us 699 [ + + ]: 27 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
700 : : ACLMASK_ANY) != ACLCHECK_OK)
5015 rhaas@postgresql.org 701 : 6 : return false;
702 : : }
703 : :
495 alvherre@alvh.no-ip. 704 [ + + ]: 1132 : while ((col = bms_next_member(perminfo->selectedCols, col)) >= 0)
705 : : {
706 : : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
3425 tgl@sss.pgh.pa.us 707 : 885 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
708 : :
709 [ + + ]: 885 : if (attno == InvalidAttrNumber)
710 : : {
711 : : /* Whole-row reference, must have priv on all cols */
5561 712 [ + + ]: 27 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
713 : : ACLMASK_ALL) != ACLCHECK_OK)
5015 rhaas@postgresql.org 714 : 15 : return false;
715 : : }
716 : : else
717 : : {
3425 tgl@sss.pgh.pa.us 718 [ + + ]: 858 : if (pg_attribute_aclcheck(relOid, attno, userid,
719 : : ACL_SELECT) != ACLCHECK_OK)
5015 rhaas@postgresql.org 720 : 431 : return false;
721 : : }
722 : : }
723 : : }
724 : :
725 : : /*
726 : : * Basically the same for the mod columns, for both INSERT and UPDATE
727 : : * privilege as specified by remainingPerms.
728 : : */
495 alvherre@alvh.no-ip. 729 [ + + ]: 731 : if (remainingPerms & ACL_INSERT &&
730 [ + + ]: 154 : !ExecCheckPermissionsModified(relOid,
731 : : userid,
732 : : perminfo->insertedCols,
733 : : ACL_INSERT))
3264 andres@anarazel.de 734 : 88 : return false;
735 : :
495 alvherre@alvh.no-ip. 736 [ + + ]: 643 : if (remainingPerms & ACL_UPDATE &&
737 [ + + ]: 438 : !ExecCheckPermissionsModified(relOid,
738 : : userid,
739 : : perminfo->updatedCols,
740 : : ACL_UPDATE))
3264 andres@anarazel.de 741 : 192 : return false;
742 : : }
743 : 247666 : return true;
744 : : }
745 : :
746 : : /*
747 : : * ExecCheckPermissionsModified
748 : : * Check INSERT or UPDATE access permissions for a single relation (these
749 : : * are processed uniformly).
750 : : */
751 : : static bool
495 alvherre@alvh.no-ip. 752 : 592 : ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
753 : : AclMode requiredPerms)
754 : : {
3264 andres@anarazel.de 755 : 592 : int col = -1;
756 : :
757 : : /*
758 : : * When the query doesn't explicitly update any columns, allow the query
759 : : * if we have permission on any column of the rel. This is to handle
760 : : * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
761 : : */
762 [ + + ]: 592 : if (bms_is_empty(modifiedCols))
763 : : {
764 [ + - ]: 24 : if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
765 : : ACLMASK_ANY) != ACLCHECK_OK)
766 : 24 : return false;
767 : : }
768 : :
769 [ + + ]: 946 : while ((col = bms_next_member(modifiedCols, col)) >= 0)
770 : : {
771 : : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
772 : 634 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
773 : :
774 [ - + ]: 634 : if (attno == InvalidAttrNumber)
775 : : {
776 : : /* whole-row reference can't happen here */
3264 andres@anarazel.de 777 [ # # ]:UBC 0 : elog(ERROR, "whole-row update is not implemented");
778 : : }
779 : : else
780 : : {
3264 andres@anarazel.de 781 [ + + ]:CBC 634 : if (pg_attribute_aclcheck(relOid, attno, userid,
782 : : requiredPerms) != ACLCHECK_OK)
783 : 256 : return false;
784 : : }
785 : : }
5015 rhaas@postgresql.org 786 : 312 : return true;
787 : : }
788 : :
789 : : /*
790 : : * Check that the query does not imply any writes to non-temp tables;
791 : : * unless we're in parallel mode, in which case don't even allow writes
792 : : * to temp tables.
793 : : *
794 : : * Note: in a Hot Standby this would need to reject writes to temp
795 : : * tables just as we do in parallel mode; but an HS standby can't have created
796 : : * any temp tables in the first place, so no need to check that.
797 : : */
798 : : static void
5995 bruce@momjian.us 799 : 65876 : ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
800 : : {
801 : : ListCell *l;
802 : :
803 : : /*
804 : : * Fail if write permissions are requested in parallel mode for table
805 : : * (temp or non-temp), otherwise fail for any non-temp table.
806 : : */
495 alvherre@alvh.no-ip. 807 [ + + + + : 108569 : foreach(l, plannedstmt->permInfos)
+ + ]
808 : : {
809 : 42701 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
810 : :
811 [ + + ]: 42701 : if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
7396 tgl@sss.pgh.pa.us 812 : 42687 : continue;
813 : :
495 alvherre@alvh.no-ip. 814 [ + + ]: 14 : if (isTempNamespace(get_rel_namespace(perminfo->relid)))
7396 tgl@sss.pgh.pa.us 815 : 6 : continue;
816 : :
1504 alvherre@alvh.no-ip. 817 : 8 : PreventCommandIfReadOnly(CreateCommandName((Node *) plannedstmt));
818 : : }
819 : :
3272 rhaas@postgresql.org 820 [ + + - + ]: 65868 : if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
1504 alvherre@alvh.no-ip. 821 : 6 : PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
7765 peter_e@gmx.net 822 : 65868 : }
823 : :
824 : :
825 : : /* ----------------------------------------------------------------
826 : : * InitPlan
827 : : *
828 : : * Initializes the query plan: open files, allocate storage
829 : : * and start up the rule manager
830 : : * ----------------------------------------------------------------
831 : : */
832 : : static void
6620 tgl@sss.pgh.pa.us 833 : 302072 : InitPlan(QueryDesc *queryDesc, int eflags)
834 : : {
7801 835 : 302072 : CmdType operation = queryDesc->operation;
6263 836 : 302072 : PlannedStmt *plannedstmt = queryDesc->plannedstmt;
837 : 302072 : Plan *plan = plannedstmt->planTree;
838 : 302072 : List *rangeTable = plannedstmt->rtable;
7559 bruce@momjian.us 839 : 302072 : EState *estate = queryDesc->estate;
840 : : PlanState *planstate;
841 : : TupleDesc tupType;
842 : : ListCell *l;
843 : : int i;
844 : :
845 : : /*
846 : : * Do permissions checks
847 : : */
495 alvherre@alvh.no-ip. 848 : 302072 : ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
849 : :
850 : : /*
851 : : * initialize the node's execution state
852 : : */
405 tgl@sss.pgh.pa.us 853 : 301313 : ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos);
854 : :
5284 855 : 301313 : estate->es_plannedstmt = plannedstmt;
856 : :
857 : : /*
858 : : * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
859 : : */
2015 860 [ + + ]: 301313 : if (plannedstmt->rowMarks)
861 : : {
862 : 4906 : estate->es_rowmarks = (ExecRowMark **)
863 : 4906 : palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
864 [ + - + + : 11176 : foreach(l, plannedstmt->rowMarks)
+ + ]
865 : : {
866 : 6273 : PlanRowMark *rc = (PlanRowMark *) lfirst(l);
867 : : Oid relid;
868 : : Relation relation;
869 : : ExecRowMark *erm;
870 : :
871 : : /* ignore "parent" rowmarks; they are irrelevant at runtime */
872 [ + + ]: 6273 : if (rc->isParent)
873 : 874 : continue;
874 : :
875 : : /* get relation's OID (will produce InvalidOid if subquery) */
876 : 5399 : relid = exec_rt_fetch(rc->rti, estate)->relid;
877 : :
878 : : /* open relation, if we need to access it for this mark type */
879 [ + + - ]: 5399 : switch (rc->markType)
880 : : {
881 : 5055 : case ROW_MARK_EXCLUSIVE:
882 : : case ROW_MARK_NOKEYEXCLUSIVE:
883 : : case ROW_MARK_SHARE:
884 : : case ROW_MARK_KEYSHARE:
885 : : case ROW_MARK_REFERENCE:
886 : 5055 : relation = ExecGetRangeTableRelation(estate, rc->rti);
887 : 5055 : break;
888 : 344 : case ROW_MARK_COPY:
889 : : /* no physical table access is required */
890 : 344 : relation = NULL;
891 : 344 : break;
2015 tgl@sss.pgh.pa.us 892 :UBC 0 : default:
893 [ # # ]: 0 : elog(ERROR, "unrecognized markType: %d", rc->markType);
894 : : relation = NULL; /* keep compiler quiet */
895 : : break;
896 : : }
897 : :
898 : : /* Check that relation is a legal target for marking */
2015 tgl@sss.pgh.pa.us 899 [ + + ]:CBC 5399 : if (relation)
900 : 5055 : CheckValidRowMarkRel(relation, rc->markType);
901 : :
902 : 5396 : erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
903 : 5396 : erm->relation = relation;
904 : 5396 : erm->relid = relid;
905 : 5396 : erm->rti = rc->rti;
906 : 5396 : erm->prti = rc->prti;
907 : 5396 : erm->rowmarkId = rc->rowmarkId;
908 : 5396 : erm->markType = rc->markType;
909 : 5396 : erm->strength = rc->strength;
910 : 5396 : erm->waitPolicy = rc->waitPolicy;
911 : 5396 : erm->ermActive = false;
912 : 5396 : ItemPointerSetInvalid(&(erm->curCtid));
913 : 5396 : erm->ermExtra = NULL;
914 : :
915 [ + - + - : 5396 : Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
- + ]
916 : : estate->es_rowmarks[erm->rti - 1] == NULL);
917 : :
918 : 5396 : estate->es_rowmarks[erm->rti - 1] = erm;
919 : : }
920 : : }
921 : :
922 : : /*
923 : : * Initialize the executor's tuple table to empty.
924 : : */
5313 925 : 301310 : estate->es_tupleTable = NIL;
926 : :
927 : : /* signal that this EState is not used for EPQ */
1683 andres@anarazel.de 928 : 301310 : estate->es_epq_active = NULL;
929 : :
930 : : /*
931 : : * Initialize private state information for each SubPlan. We must do this
932 : : * before running ExecInitNode on the main query tree, since
933 : : * ExecInitSubPlan expects to be able to find these entries.
934 : : */
6256 tgl@sss.pgh.pa.us 935 [ - + ]: 301310 : Assert(estate->es_subplanstates == NIL);
936 : 301310 : i = 1; /* subplan indices count from 1 */
937 [ + + + + : 320432 : foreach(l, plannedstmt->subplans)
+ + ]
938 : : {
5995 bruce@momjian.us 939 : 19122 : Plan *subplan = (Plan *) lfirst(l);
940 : : PlanState *subplanstate;
941 : : int sp_eflags;
942 : :
943 : : /*
944 : : * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
945 : : * it is a parameterless subplan (not initplan), we suggest that it be
946 : : * prepared to handle REWIND efficiently; otherwise there is no need.
947 : : */
3816 kgrittn@postgresql.o 948 : 19122 : sp_eflags = eflags
949 : : & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
6256 tgl@sss.pgh.pa.us 950 [ + + ]: 19122 : if (bms_is_member(i, plannedstmt->rewindPlanIDs))
951 : 21 : sp_eflags |= EXEC_FLAG_REWIND;
952 : :
953 : 19122 : subplanstate = ExecInitNode(subplan, estate, sp_eflags);
954 : :
955 : 19122 : estate->es_subplanstates = lappend(estate->es_subplanstates,
956 : : subplanstate);
957 : :
958 : 19122 : i++;
959 : : }
960 : :
961 : : /*
962 : : * Initialize the private state information for all the nodes in the query
963 : : * tree. This opens files, allocates storage and leaves us ready to start
964 : : * processing tuples.
965 : : */
6620 966 : 301310 : planstate = ExecInitNode(plan, estate, eflags);
967 : :
968 : : /*
969 : : * Get the tuple descriptor describing the type of tuples to return.
970 : : */
7650 971 : 301117 : tupType = ExecGetResultType(planstate);
972 : :
973 : : /*
974 : : * Initialize the junk filter if needed. SELECT queries need a filter if
975 : : * there are any junk attrs in the top-level tlist.
976 : : */
5300 977 [ + + ]: 301117 : if (operation == CMD_SELECT)
978 : : {
9357 bruce@momjian.us 979 : 244703 : bool junk_filter_needed = false;
980 : : ListCell *tlist;
981 : :
5300 tgl@sss.pgh.pa.us 982 [ + + + + : 809719 : foreach(tlist, plan->targetlist)
+ + ]
983 : : {
984 : 576432 : TargetEntry *tle = (TargetEntry *) lfirst(tlist);
985 : :
986 [ + + ]: 576432 : if (tle->resjunk)
987 : : {
8933 988 : 11416 : junk_filter_needed = true;
989 : 11416 : break;
990 : : }
991 : : }
992 : :
993 [ + + ]: 244703 : if (junk_filter_needed)
994 : : {
995 : : JunkFilter *j;
996 : : TupleTableSlot *slot;
997 : :
1977 andres@anarazel.de 998 : 11416 : slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
5300 tgl@sss.pgh.pa.us 999 : 11416 : j = ExecInitJunkFilter(planstate->plan->targetlist,
1000 : : slot);
1001 : 11416 : estate->es_junkFilter = j;
1002 : :
1003 : : /* Want to return the cleaned tuple type */
1004 : 11416 : tupType = j->jf_cleanTupType;
1005 : : }
1006 : : }
1007 : :
7801 1008 : 301117 : queryDesc->tupDesc = tupType;
1009 : 301117 : queryDesc->planstate = planstate;
10141 scrappy@hub.org 1010 : 301117 : }
1011 : :
1012 : : /*
1013 : : * Check that a proposed result relation is a legal target for the operation
1014 : : *
1015 : : * Generally the parser and/or planner should have noticed any such mistake
1016 : : * already, but let's make sure.
1017 : : *
1018 : : * For MERGE, mergeActions is the list of actions that may be performed. The
1019 : : * result relation is required to support every action, regardless of whether
1020 : : * or not they are all executed.
1021 : : *
1022 : : * Note: when changing this function, you probably also need to look at
1023 : : * CheckValidRowMarkRel.
1024 : : */
1025 : : void
45 dean.a.rasheed@gmail 1026 :GNC 62557 : CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation,
1027 : : List *mergeActions)
1028 : : {
2411 rhaas@postgresql.org 1029 :CBC 62557 : Relation resultRel = resultRelInfo->ri_RelationDesc;
1030 : : FdwRoutine *fdwroutine;
1031 : :
4797 tgl@sss.pgh.pa.us 1032 [ + - - + : 62557 : switch (resultRel->rd_rel->relkind)
+ + - ]
1033 : : {
6087 1034 : 61976 : case RELKIND_RELATION:
1035 : : case RELKIND_PARTITIONED_TABLE:
2642 peter_e@gmx.net 1036 : 61976 : CheckCmdReplicaIdentity(resultRel, operation);
6087 tgl@sss.pgh.pa.us 1037 : 61844 : break;
8554 tgl@sss.pgh.pa.us 1038 :UBC 0 : case RELKIND_SEQUENCE:
7573 1039 [ # # ]: 0 : ereport(ERROR,
1040 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1041 : : errmsg("cannot change sequence \"%s\"",
1042 : : RelationGetRelationName(resultRel))));
1043 : : break;
8554 1044 : 0 : case RELKIND_TOASTVALUE:
7573 1045 [ # # ]: 0 : ereport(ERROR,
1046 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1047 : : errmsg("cannot change TOAST relation \"%s\"",
1048 : : RelationGetRelationName(resultRel))));
1049 : : break;
8554 tgl@sss.pgh.pa.us 1050 [ + + + - ]:CBC 195 : case RELKIND_VIEW:
1051 : :
1052 : : /*
1053 : : * Okay only if there's a suitable INSTEAD OF trigger. Otherwise,
1054 : : * complain, but omit errdetail because we haven't got the
1055 : : * information handy (and given that it really shouldn't happen,
1056 : : * it's not worth great exertion to get).
1057 : : */
45 dean.a.rasheed@gmail 1058 [ - + ]:GNC 195 : if (!view_has_instead_trigger(resultRel, operation, mergeActions))
45 dean.a.rasheed@gmail 1059 :UNC 0 : error_view_not_updatable(resultRel, operation, mergeActions,
1060 : : NULL);
8554 tgl@sss.pgh.pa.us 1061 :CBC 195 : break;
4060 kgrittn@postgresql.o 1062 : 60 : case RELKIND_MATVIEW:
3925 1063 [ - + ]: 60 : if (!MatViewIncrementalMaintenanceIsEnabled())
3925 kgrittn@postgresql.o 1064 [ # # ]:UBC 0 : ereport(ERROR,
1065 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1066 : : errmsg("cannot change materialized view \"%s\"",
1067 : : RelationGetRelationName(resultRel))));
4060 kgrittn@postgresql.o 1068 :CBC 60 : break;
4852 rhaas@postgresql.org 1069 : 326 : case RELKIND_FOREIGN_TABLE:
1070 : : /* Okay only if the FDW supports it */
2411 1071 [ + + + - ]: 326 : fdwroutine = resultRelInfo->ri_FdwRoutine;
1072 : : switch (operation)
1073 : : {
4053 tgl@sss.pgh.pa.us 1074 : 152 : case CMD_INSERT:
1075 [ + + ]: 152 : if (fdwroutine->ExecForeignInsert == NULL)
1076 [ + - ]: 5 : ereport(ERROR,
1077 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1078 : : errmsg("cannot insert into foreign table \"%s\"",
1079 : : RelationGetRelationName(resultRel))));
3959 1080 [ + - ]: 147 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1081 [ - + ]: 147 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
3959 tgl@sss.pgh.pa.us 1082 [ # # ]:UBC 0 : ereport(ERROR,
1083 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1084 : : errmsg("foreign table \"%s\" does not allow inserts",
1085 : : RelationGetRelationName(resultRel))));
4053 tgl@sss.pgh.pa.us 1086 :CBC 147 : break;
1087 : 97 : case CMD_UPDATE:
1088 [ + + ]: 97 : if (fdwroutine->ExecForeignUpdate == NULL)
1089 [ + - ]: 2 : ereport(ERROR,
1090 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1091 : : errmsg("cannot update foreign table \"%s\"",
1092 : : RelationGetRelationName(resultRel))));
3959 1093 [ + - ]: 95 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1094 [ - + ]: 95 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
3959 tgl@sss.pgh.pa.us 1095 [ # # ]:UBC 0 : ereport(ERROR,
1096 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1097 : : errmsg("foreign table \"%s\" does not allow updates",
1098 : : RelationGetRelationName(resultRel))));
4053 tgl@sss.pgh.pa.us 1099 :CBC 95 : break;
1100 : 77 : case CMD_DELETE:
1101 [ + + ]: 77 : if (fdwroutine->ExecForeignDelete == NULL)
1102 [ + - ]: 2 : ereport(ERROR,
1103 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1104 : : errmsg("cannot delete from foreign table \"%s\"",
1105 : : RelationGetRelationName(resultRel))));
3959 1106 [ + - ]: 75 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1107 [ - + ]: 75 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
3959 tgl@sss.pgh.pa.us 1108 [ # # ]:UBC 0 : ereport(ERROR,
1109 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1110 : : errmsg("foreign table \"%s\" does not allow deletes",
1111 : : RelationGetRelationName(resultRel))));
4053 tgl@sss.pgh.pa.us 1112 :CBC 75 : break;
4053 tgl@sss.pgh.pa.us 1113 :UBC 0 : default:
1114 [ # # ]: 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1115 : : break;
1116 : : }
4852 rhaas@postgresql.org 1117 :CBC 317 : break;
6087 tgl@sss.pgh.pa.us 1118 :UBC 0 : default:
1119 [ # # ]: 0 : ereport(ERROR,
1120 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1121 : : errmsg("cannot change relation \"%s\"",
1122 : : RelationGetRelationName(resultRel))));
1123 : : break;
1124 : : }
4797 tgl@sss.pgh.pa.us 1125 :CBC 62416 : }
1126 : :
1127 : : /*
1128 : : * Check that a proposed rowmark target relation is a legal target
1129 : : *
1130 : : * In most cases parser and/or planner should have noticed this already, but
1131 : : * they don't cover all cases.
1132 : : */
1133 : : static void
4700 1134 : 5055 : CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1135 : : {
1136 : : FdwRoutine *fdwroutine;
1137 : :
1138 [ + - - - : 5055 : switch (rel->rd_rel->relkind)
+ - - ]
1139 : : {
1140 : 5049 : case RELKIND_RELATION:
1141 : : case RELKIND_PARTITIONED_TABLE:
1142 : : /* OK */
1143 : 5049 : break;
4700 tgl@sss.pgh.pa.us 1144 :UBC 0 : case RELKIND_SEQUENCE:
1145 : : /* Must disallow this because we don't vacuum sequences */
1146 [ # # ]: 0 : ereport(ERROR,
1147 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1148 : : errmsg("cannot lock rows in sequence \"%s\"",
1149 : : RelationGetRelationName(rel))));
1150 : : break;
1151 : 0 : case RELKIND_TOASTVALUE:
1152 : : /* We could allow this, but there seems no good reason to */
1153 [ # # ]: 0 : ereport(ERROR,
1154 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1155 : : errmsg("cannot lock rows in TOAST relation \"%s\"",
1156 : : RelationGetRelationName(rel))));
1157 : : break;
1158 : 0 : case RELKIND_VIEW:
1159 : : /* Should not get here; planner should have expanded the view */
1160 [ # # ]: 0 : ereport(ERROR,
1161 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1162 : : errmsg("cannot lock rows in view \"%s\"",
1163 : : RelationGetRelationName(rel))));
1164 : : break;
4060 kgrittn@postgresql.o 1165 :CBC 6 : case RELKIND_MATVIEW:
1166 : : /* Allow referencing a matview, but not actual locking clauses */
3692 tgl@sss.pgh.pa.us 1167 [ + + ]: 6 : if (markType != ROW_MARK_REFERENCE)
1168 [ + - ]: 3 : ereport(ERROR,
1169 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1170 : : errmsg("cannot lock rows in materialized view \"%s\"",
1171 : : RelationGetRelationName(rel))));
4060 kgrittn@postgresql.o 1172 : 3 : break;
4700 tgl@sss.pgh.pa.us 1173 :UBC 0 : case RELKIND_FOREIGN_TABLE:
1174 : : /* Okay only if the FDW supports it */
3260 1175 : 0 : fdwroutine = GetFdwRoutineForRelation(rel, false);
1176 [ # # ]: 0 : if (fdwroutine->RefetchForeignRow == NULL)
1177 [ # # ]: 0 : ereport(ERROR,
1178 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1179 : : errmsg("cannot lock rows in foreign table \"%s\"",
1180 : : RelationGetRelationName(rel))));
4700 1181 : 0 : break;
1182 : 0 : default:
1183 [ # # ]: 0 : ereport(ERROR,
1184 : : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1185 : : errmsg("cannot lock rows in relation \"%s\"",
1186 : : RelationGetRelationName(rel))));
1187 : : break;
1188 : : }
4700 tgl@sss.pgh.pa.us 1189 :CBC 5052 : }
1190 : :
1191 : : /*
1192 : : * Initialize ResultRelInfo data for one result relation
1193 : : *
1194 : : * Caution: before Postgres 9.1, this function included the relkind checking
1195 : : * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1196 : : * appropriate. Be sure callers cover those needs.
1197 : : */
1198 : : void
4797 1199 : 214238 : InitResultRelInfo(ResultRelInfo *resultRelInfo,
1200 : : Relation resultRelationDesc,
1201 : : Index resultRelationIndex,
1202 : : ResultRelInfo *partition_root_rri,
1203 : : int instrument_options)
1204 : : {
8554 1205 [ + - + - : 10283424 : MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
+ - + - +
+ ]
1206 : 214238 : resultRelInfo->type = T_ResultRelInfo;
1207 : 214238 : resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1208 : 214238 : resultRelInfo->ri_RelationDesc = resultRelationDesc;
1209 : 214238 : resultRelInfo->ri_NumIndices = 0;
1210 : 214238 : resultRelInfo->ri_IndexRelationDescs = NULL;
1211 : 214238 : resultRelInfo->ri_IndexRelationInfo = NULL;
1212 : : /* make a copy so as not to depend on relcache info not changing... */
4797 1213 : 214238 : resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
6960 1214 [ + + ]: 214238 : if (resultRelInfo->ri_TrigDesc)
1215 : : {
6756 bruce@momjian.us 1216 : 8612 : int n = resultRelInfo->ri_TrigDesc->numtriggers;
1217 : :
6960 tgl@sss.pgh.pa.us 1218 : 8612 : resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1219 : 8612 : palloc0(n * sizeof(FmgrInfo));
2588 andres@anarazel.de 1220 : 8612 : resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1221 : 8612 : palloc0(n * sizeof(ExprState *));
5234 rhaas@postgresql.org 1222 [ - + ]: 8612 : if (instrument_options)
1068 efujita@postgresql.o 1223 :UBC 0 : resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options, false);
1224 : : }
1225 : : else
1226 : : {
6960 tgl@sss.pgh.pa.us 1227 :CBC 205626 : resultRelInfo->ri_TrigFunctions = NULL;
5259 1228 : 205626 : resultRelInfo->ri_TrigWhenExprs = NULL;
6960 1229 : 205626 : resultRelInfo->ri_TrigInstrument = NULL;
1230 : : }
4053 1231 [ + + ]: 214238 : if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1232 : 337 : resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1233 : : else
1234 : 213901 : resultRelInfo->ri_FdwRoutine = NULL;
1235 : :
1236 : : /* The following fields are set later if needed */
1110 1237 : 214238 : resultRelInfo->ri_RowIdAttNo = 0;
465 1238 : 214238 : resultRelInfo->ri_extraUpdatedCols = NULL;
1110 1239 : 214238 : resultRelInfo->ri_projectNew = NULL;
1240 : 214238 : resultRelInfo->ri_newTupleSlot = NULL;
1241 : 214238 : resultRelInfo->ri_oldTupleSlot = NULL;
1104 1242 : 214238 : resultRelInfo->ri_projectNewInfoValid = false;
4053 1243 : 214238 : resultRelInfo->ri_FdwState = NULL;
2949 rhaas@postgresql.org 1244 : 214238 : resultRelInfo->ri_usesFdwDirectModify = false;
8554 tgl@sss.pgh.pa.us 1245 : 214238 : resultRelInfo->ri_ConstraintExprs = NULL;
405 1246 : 214238 : resultRelInfo->ri_GeneratedExprsI = NULL;
1247 : 214238 : resultRelInfo->ri_GeneratedExprsU = NULL;
6455 1248 : 214238 : resultRelInfo->ri_projectReturning = NULL;
2211 alvherre@alvh.no-ip. 1249 : 214238 : resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1250 : 214238 : resultRelInfo->ri_onConflict = NULL;
1874 andres@anarazel.de 1251 : 214238 : resultRelInfo->ri_ReturningSlot = NULL;
1252 : 214238 : resultRelInfo->ri_TrigOldSlot = NULL;
1253 : 214238 : resultRelInfo->ri_TrigNewSlot = NULL;
15 dean.a.rasheed@gmail 1254 :GNC 214238 : resultRelInfo->ri_MergeActions[MERGE_WHEN_MATCHED] = NIL;
1255 : 214238 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] = NIL;
1256 : 214238 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET] = NIL;
1257 : 214238 : resultRelInfo->ri_MergeJoinCondition = NULL;
1258 : :
1259 : : /*
1260 : : * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1261 : : * non-NULL partition_root_rri. For child relations that are part of the
1262 : : * initial query rather than being dynamically added by tuple routing,
1263 : : * this field is filled in ExecInitModifyTable().
1264 : : */
1161 heikki.linnakangas@i 1265 :CBC 214238 : resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1266 : : /* Set by ExecGetRootToChildMap */
499 alvherre@alvh.no-ip. 1267 : 214238 : resultRelInfo->ri_RootToChildMap = NULL;
1268 : 214238 : resultRelInfo->ri_RootToChildMapValid = false;
1269 : : /* Set by ExecInitRoutingInfo */
1270 : 214238 : resultRelInfo->ri_PartitionTupleSlot = NULL;
1273 heikki.linnakangas@i 1271 : 214238 : resultRelInfo->ri_ChildToRootMap = NULL;
1104 tgl@sss.pgh.pa.us 1272 : 214238 : resultRelInfo->ri_ChildToRootMapValid = false;
1837 andres@anarazel.de 1273 : 214238 : resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
8554 tgl@sss.pgh.pa.us 1274 : 214238 : }
1275 : :
1276 : : /*
1277 : : * ExecGetTriggerResultRel
1278 : : * Get a ResultRelInfo for a trigger target relation.
1279 : : *
1280 : : * Most of the time, triggers are fired on one of the result relations of the
1281 : : * query, and so we can just return a member of the es_result_relations array,
1282 : : * or the es_tuple_routing_result_relations list (if any). (Note: in self-join
1283 : : * situations there might be multiple members with the same OID; if so it
1284 : : * doesn't matter which one we pick.)
1285 : : *
1286 : : * However, it is sometimes necessary to fire triggers on other relations;
1287 : : * this happens mainly when an RI update trigger queues additional triggers
1288 : : * on other relations, which will be processed in the context of the outer
1289 : : * query. For efficiency's sake, we want to have a ResultRelInfo for those
1290 : : * triggers too; that can avoid repeated re-opening of the relation. (It
1291 : : * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1292 : : * triggers.) So we make additional ResultRelInfo's as needed, and save them
1293 : : * in es_trig_target_relations.
1294 : : */
1295 : : ResultRelInfo *
756 alvherre@alvh.no-ip. 1296 : 4066 : ExecGetTriggerResultRel(EState *estate, Oid relid,
1297 : : ResultRelInfo *rootRelInfo)
1298 : : {
1299 : : ResultRelInfo *rInfo;
1300 : : ListCell *l;
1301 : : Relation rel;
1302 : : MemoryContext oldcontext;
1303 : :
1304 : : /* Search through the query result relations */
1279 heikki.linnakangas@i 1305 [ + + + + : 5150 : foreach(l, estate->es_opened_result_relations)
+ + ]
1306 : : {
1307 : 4454 : rInfo = lfirst(l);
6087 tgl@sss.pgh.pa.us 1308 [ + + ]: 4454 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1309 : 3370 : return rInfo;
1310 : : }
1311 : :
1312 : : /*
1313 : : * Search through the result relations that were created during tuple
1314 : : * routing, if any.
1315 : : */
2257 rhaas@postgresql.org 1316 [ + + + + : 800 : foreach(l, estate->es_tuple_routing_result_relations)
+ + ]
1317 : : {
2431 1318 : 434 : rInfo = (ResultRelInfo *) lfirst(l);
1319 [ + + ]: 434 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1320 : 330 : return rInfo;
1321 : : }
1322 : :
1323 : : /* Nope, but maybe we already made an extra ResultRelInfo for it */
6087 tgl@sss.pgh.pa.us 1324 [ + + + + : 537 : foreach(l, estate->es_trig_target_relations)
+ + ]
1325 : : {
1326 : 186 : rInfo = (ResultRelInfo *) lfirst(l);
1327 [ + + ]: 186 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1328 : 15 : return rInfo;
1329 : : }
1330 : : /* Nope, so we need a new one */
1331 : :
1332 : : /*
1333 : : * Open the target relation's relcache entry. We assume that an
1334 : : * appropriate lock is still held by the backend from whenever the trigger
1335 : : * event got queued, so we need take no new lock here. Also, we need not
1336 : : * recheck the relkind, so no need for CheckValidResultRel.
1337 : : */
1910 andres@anarazel.de 1338 : 351 : rel = table_open(relid, NoLock);
1339 : :
1340 : : /*
1341 : : * Make the new entry in the right context.
1342 : : */
6087 tgl@sss.pgh.pa.us 1343 : 351 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1344 : 351 : rInfo = makeNode(ResultRelInfo);
5861 1345 : 351 : InitResultRelInfo(rInfo,
1346 : : rel,
1347 : : 0, /* dummy rangetable index */
1348 : : rootRelInfo,
1349 : : estate->es_instrument);
6087 1350 : 351 : estate->es_trig_target_relations =
1351 : 351 : lappend(estate->es_trig_target_relations, rInfo);
1352 : 351 : MemoryContextSwitchTo(oldcontext);
1353 : :
1354 : : /*
1355 : : * Currently, we don't need any index information in ResultRelInfos used
1356 : : * only for triggers, so no need to call ExecOpenIndices.
1357 : : */
1358 : :
1359 : 351 : return rInfo;
1360 : : }
1361 : :
1362 : : /*
1363 : : * Return the ancestor relations of a given leaf partition result relation
1364 : : * up to and including the query's root target relation.
1365 : : *
1366 : : * These work much like the ones opened by ExecGetTriggerResultRel, except
1367 : : * that we need to keep them in a separate list.
1368 : : *
1369 : : * These are closed by ExecCloseResultRelations.
1370 : : */
1371 : : List *
756 alvherre@alvh.no-ip. 1372 : 138 : ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
1373 : : {
1374 : 138 : ResultRelInfo *rootRelInfo = resultRelInfo->ri_RootResultRelInfo;
1375 : 138 : Relation partRel = resultRelInfo->ri_RelationDesc;
1376 : : Oid rootRelOid;
1377 : :
1378 [ - + ]: 138 : if (!partRel->rd_rel->relispartition)
756 alvherre@alvh.no-ip. 1379 [ # # ]:UBC 0 : elog(ERROR, "cannot find ancestors of a non-partition result relation");
756 alvherre@alvh.no-ip. 1380 [ - + ]:CBC 138 : Assert(rootRelInfo != NULL);
1381 : 138 : rootRelOid = RelationGetRelid(rootRelInfo->ri_RelationDesc);
1382 [ + + ]: 138 : if (resultRelInfo->ri_ancestorResultRels == NIL)
1383 : : {
1384 : : ListCell *lc;
1385 : 111 : List *oids = get_partition_ancestors(RelationGetRelid(partRel));
1386 : 111 : List *ancResultRels = NIL;
1387 : :
1388 [ + - + - : 144 : foreach(lc, oids)
+ - ]
1389 : : {
1390 : 144 : Oid ancOid = lfirst_oid(lc);
1391 : : Relation ancRel;
1392 : : ResultRelInfo *rInfo;
1393 : :
1394 : : /*
1395 : : * Ignore the root ancestor here, and use ri_RootResultRelInfo
1396 : : * (below) for it instead. Also, we stop climbing up the
1397 : : * hierarchy when we find the table that was mentioned in the
1398 : : * query.
1399 : : */
1400 [ + + ]: 144 : if (ancOid == rootRelOid)
1401 : 111 : break;
1402 : :
1403 : : /*
1404 : : * All ancestors up to the root target relation must have been
1405 : : * locked by the planner or AcquireExecutorLocks().
1406 : : */
1407 : 33 : ancRel = table_open(ancOid, NoLock);
1408 : 33 : rInfo = makeNode(ResultRelInfo);
1409 : :
1410 : : /* dummy rangetable index */
1411 : 33 : InitResultRelInfo(rInfo, ancRel, 0, NULL,
1412 : : estate->es_instrument);
1413 : 33 : ancResultRels = lappend(ancResultRels, rInfo);
1414 : : }
1415 : 111 : ancResultRels = lappend(ancResultRels, rootRelInfo);
1416 : 111 : resultRelInfo->ri_ancestorResultRels = ancResultRels;
1417 : : }
1418 : :
1419 : : /* We must have found some ancestor */
1420 [ - + ]: 138 : Assert(resultRelInfo->ri_ancestorResultRels != NIL);
1421 : :
1422 : 138 : return resultRelInfo->ri_ancestorResultRels;
1423 : : }
1424 : :
1425 : : /* ----------------------------------------------------------------
1426 : : * ExecPostprocessPlan
1427 : : *
1428 : : * Give plan nodes a final chance to execute before shutdown
1429 : : * ----------------------------------------------------------------
1430 : : */
1431 : : static void
4797 tgl@sss.pgh.pa.us 1432 : 279869 : ExecPostprocessPlan(EState *estate)
1433 : : {
1434 : : ListCell *lc;
1435 : :
1436 : : /*
1437 : : * Make sure nodes run forward.
1438 : : */
1439 : 279869 : estate->es_direction = ForwardScanDirection;
1440 : :
1441 : : /*
1442 : : * Run any secondary ModifyTable nodes to completion, in case the main
1443 : : * query did not fetch all rows from them. (We do this to ensure that
1444 : : * such nodes have predictable results.)
1445 : : */
1446 [ + + + + : 280290 : foreach(lc, estate->es_auxmodifytables)
+ + ]
1447 : : {
4753 bruce@momjian.us 1448 : 421 : PlanState *ps = (PlanState *) lfirst(lc);
1449 : :
1450 : : for (;;)
4797 tgl@sss.pgh.pa.us 1451 : 69 : {
1452 : : TupleTableSlot *slot;
1453 : :
1454 : : /* Reset the per-output-tuple exprcontext each time */
1455 [ + + ]: 490 : ResetPerTupleExprContext(estate);
1456 : :
1457 : 490 : slot = ExecProcNode(ps);
1458 : :
1459 [ + + + - ]: 490 : if (TupIsNull(slot))
1460 : : break;
1461 : : }
1462 : : }
1463 : 279869 : }
1464 : :
1465 : : /* ----------------------------------------------------------------
1466 : : * ExecEndPlan
1467 : : *
1468 : : * Cleans up the query plan -- closes files and frees up storage
1469 : : *
1470 : : * NOTE: we are no longer very worried about freeing storage per se
1471 : : * in this code; FreeExecutorState should be guaranteed to release all
1472 : : * memory that needs to be released. What we are worried about doing
1473 : : * is closing relations and dropping buffer pins. Thus, for example,
1474 : : * tuple tables must be cleared or dropped to ensure pins are released.
1475 : : * ----------------------------------------------------------------
1476 : : */
1477 : : static void
7555 bruce@momjian.us 1478 : 288737 : ExecEndPlan(PlanState *planstate, EState *estate)
1479 : : {
1480 : : ListCell *l;
1481 : :
1482 : : /*
1483 : : * shut down the node-type-specific query processing
1484 : : */
7801 tgl@sss.pgh.pa.us 1485 : 288737 : ExecEndNode(planstate);
1486 : :
1487 : : /*
1488 : : * for subplans too
1489 : : */
6256 1490 [ + + + + : 307572 : foreach(l, estate->es_subplanstates)
+ + ]
1491 : : {
5995 bruce@momjian.us 1492 : 18835 : PlanState *subplanstate = (PlanState *) lfirst(l);
1493 : :
6256 tgl@sss.pgh.pa.us 1494 : 18835 : ExecEndNode(subplanstate);
1495 : : }
1496 : :
1497 : : /*
1498 : : * destroy the executor's tuple table. Actually we only care about
1499 : : * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1500 : : * the TupleTableSlots, since the containing memory context is about to go
1501 : : * away anyway.
1502 : : */
5313 1503 : 288737 : ExecResetTupleTable(estate->es_tupleTable, false);
1504 : :
1505 : : /*
1506 : : * Close any Relations that have been opened for range table entries or
1507 : : * result relations.
1508 : : */
1279 heikki.linnakangas@i 1509 : 288737 : ExecCloseResultRelations(estate);
1510 : 288737 : ExecCloseRangeTableRelations(estate);
1511 : 288737 : }
1512 : :
1513 : : /*
1514 : : * Close any relations that have been opened for ResultRelInfos.
1515 : : */
1516 : : void
1517 : 289598 : ExecCloseResultRelations(EState *estate)
1518 : : {
1519 : : ListCell *l;
1520 : :
1521 : : /*
1522 : : * close indexes of result relation(s) if any. (Rels themselves are
1523 : : * closed in ExecCloseRangeTableRelations())
1524 : : *
1525 : : * In addition, close the stub RTs that may be in each resultrel's
1526 : : * ri_ancestorResultRels.
1527 : : */
1528 [ + + + + : 346825 : foreach(l, estate->es_opened_result_relations)
+ + ]
1529 : : {
1530 : 57227 : ResultRelInfo *resultRelInfo = lfirst(l);
1531 : : ListCell *lc;
1532 : :
8554 tgl@sss.pgh.pa.us 1533 : 57227 : ExecCloseIndices(resultRelInfo);
756 alvherre@alvh.no-ip. 1534 [ + + + + : 57347 : foreach(lc, resultRelInfo->ri_ancestorResultRels)
+ + ]
1535 : : {
1536 : 120 : ResultRelInfo *rInfo = lfirst(lc);
1537 : :
1538 : : /*
1539 : : * Ancestors with RTI > 0 (should only be the root ancestor) are
1540 : : * closed by ExecCloseRangeTableRelations.
1541 : : */
1542 [ + + ]: 120 : if (rInfo->ri_RangeTableIndex > 0)
1543 : 96 : continue;
1544 : :
1545 : 24 : table_close(rInfo->ri_RelationDesc, NoLock);
1546 : : }
1547 : : }
1548 : :
1549 : : /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
1279 heikki.linnakangas@i 1550 [ + + + + : 289843 : foreach(l, estate->es_trig_target_relations)
+ + ]
1551 : : {
1552 : 245 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1553 : :
1554 : : /*
1555 : : * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1556 : : * might be issuing a duplicate close against a Relation opened by
1557 : : * ExecGetRangeTableRelation.
1558 : : */
1559 [ - + ]: 245 : Assert(resultRelInfo->ri_RangeTableIndex == 0);
1560 : :
1561 : : /*
1562 : : * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1563 : : * these rels, we needn't call ExecCloseIndices either.
1564 : : */
1565 [ - + ]: 245 : Assert(resultRelInfo->ri_NumIndices == 0);
1566 : :
1567 : 245 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1568 : : }
1569 : 289598 : }
1570 : :
1571 : : /*
1572 : : * Close all relations opened by ExecGetRangeTableRelation().
1573 : : *
1574 : : * We do not release any locks we might hold on those rels.
1575 : : */
1576 : : void
1577 : 289409 : ExecCloseRangeTableRelations(EState *estate)
1578 : : {
1579 : : int i;
1580 : :
1581 [ + + ]: 805720 : for (i = 0; i < estate->es_range_table_size; i++)
1582 : : {
2019 tgl@sss.pgh.pa.us 1583 [ + + ]: 516311 : if (estate->es_relations[i])
1910 andres@anarazel.de 1584 : 231585 : table_close(estate->es_relations[i], NoLock);
1585 : : }
10141 scrappy@hub.org 1586 : 289409 : }
1587 : :
1588 : : /* ----------------------------------------------------------------
1589 : : * ExecutePlan
1590 : : *
1591 : : * Processes the query plan until we have retrieved 'numberTuples' tuples,
1592 : : * moving in the specified direction.
1593 : : *
1594 : : * Runs to completion if numberTuples is 0
1595 : : *
1596 : : * Note: the ctid attribute is a 'junk' attribute that is removed before the
1597 : : * user can see it
1598 : : * ----------------------------------------------------------------
1599 : : */
1600 : : static void
9715 bruce@momjian.us 1601 : 297099 : ExecutePlan(EState *estate,
1602 : : PlanState *planstate,
1603 : : bool use_parallel_mode,
1604 : : CmdType operation,
1605 : : bool sendTuples,
1606 : : uint64 numberTuples,
1607 : : ScanDirection direction,
1608 : : DestReceiver *dest,
1609 : : bool execute_once)
1610 : : {
1611 : : TupleTableSlot *slot;
1612 : : uint64 current_tuple_count;
1613 : :
1614 : : /*
1615 : : * initialize local variables
1616 : : */
9716 1617 : 297099 : current_tuple_count = 0;
1618 : :
1619 : : /*
1620 : : * Set the direction.
1621 : : */
1622 : 297099 : estate->es_direction = direction;
1623 : :
1624 : : /*
1625 : : * If the plan might potentially be executed multiple times, we must force
1626 : : * it to run without parallelism, because we might exit early.
1627 : : */
2383 rhaas@postgresql.org 1628 [ + + ]: 297099 : if (!execute_once)
3103 1629 : 10347 : use_parallel_mode = false;
1630 : :
2361 1631 : 297099 : estate->es_use_parallel_mode = use_parallel_mode;
3103 1632 [ + + ]: 297099 : if (use_parallel_mode)
1633 : 323 : EnterParallelMode();
1634 : :
1635 : : /*
1636 : : * Loop until we've processed the proper number of tuples from the plan.
1637 : : */
1638 : : for (;;)
1639 : : {
1640 : : /* Reset the per-output-tuple exprcontext */
8483 tgl@sss.pgh.pa.us 1641 [ + + ]: 6492428 : ResetPerTupleExprContext(estate);
1642 : :
1643 : : /*
1644 : : * Execute the plan and obtain a tuple
1645 : : */
5298 1646 : 6492428 : slot = ExecProcNode(planstate);
1647 : :
1648 : : /*
1649 : : * if the tuple is null, then we assume there is nothing more to
1650 : : * process so we just end the loop...
1651 : : */
1652 [ + + + + ]: 6480745 : if (TupIsNull(slot))
1653 : : break;
1654 : :
1655 : : /*
1656 : : * If we have a junk filter, then project a new tuple with the junk
1657 : : * removed.
1658 : : *
1659 : : * Store this new "clean" tuple in the junkfilter's resultSlot.
1660 : : * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1661 : : * because that tuple slot has the wrong descriptor.)
1662 : : */
1663 [ + + ]: 6263081 : if (estate->es_junkFilter != NULL)
1664 : 118664 : slot = ExecFilterJunk(estate->es_junkFilter, slot);
1665 : :
1666 : : /*
1667 : : * If we are supposed to send the tuple somewhere, do so. (In
1668 : : * practice, this is probably always the case at this point.)
1669 : : */
5300 1670 [ + - ]: 6263081 : if (sendTuples)
1671 : : {
1672 : : /*
1673 : : * If we are not able to send the tuple, we assume the destination
1674 : : * has closed and no more tuples can be sent. If that's the case,
1675 : : * end the loop.
1676 : : */
2411 peter_e@gmx.net 1677 [ - + ]: 6263081 : if (!dest->receiveSlot(slot, dest))
2869 rhaas@postgresql.org 1678 :UBC 0 : break;
1679 : : }
1680 : :
1681 : : /*
1682 : : * Count tuples processed, if this is a SELECT. (For other operation
1683 : : * types, the ModifyTable plan node must count the appropriate
1684 : : * events.)
1685 : : */
5300 tgl@sss.pgh.pa.us 1686 [ + + ]:CBC 6263081 : if (operation == CMD_SELECT)
1687 : 6260052 : (estate->es_processed)++;
1688 : :
1689 : : /*
1690 : : * check our tuple count.. if we've processed the proper number then
1691 : : * quit, else loop again and process more tuples. Zero numberTuples
1692 : : * means no limit.
1693 : : */
8571 1694 : 6263081 : current_tuple_count++;
7767 1695 [ + + + + ]: 6263081 : if (numberTuples && numberTuples == current_tuple_count)
9716 bruce@momjian.us 1696 : 67752 : break;
1697 : : }
1698 : :
1699 : : /*
1700 : : * If we know we won't need to back up, we can release resources at this
1701 : : * point.
1702 : : */
1611 tmunro@postgresql.or 1703 [ + + ]: 285416 : if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
573 tgl@sss.pgh.pa.us 1704 : 282822 : ExecShutdownNode(planstate);
1705 : :
3103 rhaas@postgresql.org 1706 [ + + ]: 285416 : if (use_parallel_mode)
1707 : 320 : ExitParallelMode();
10141 scrappy@hub.org 1708 : 285416 : }
1709 : :
1710 : :
1711 : : /*
1712 : : * ExecRelCheck --- check that tuple meets constraints for result relation
1713 : : *
1714 : : * Returns NULL if OK, else name of failed check constraint
1715 : : */
1716 : : static const char *
8554 tgl@sss.pgh.pa.us 1717 : 1355 : ExecRelCheck(ResultRelInfo *resultRelInfo,
1718 : : TupleTableSlot *slot, EState *estate)
1719 : : {
1720 : 1355 : Relation rel = resultRelInfo->ri_RelationDesc;
9715 bruce@momjian.us 1721 : 1355 : int ncheck = rel->rd_att->constr->num_check;
1722 : 1355 : ConstrCheck *check = rel->rd_att->constr->check;
1723 : : ExprContext *econtext;
1724 : : MemoryContext oldContext;
1725 : : int i;
1726 : :
1727 : : /*
1728 : : * CheckConstraintFetch let this pass with only a warning, but now we
1729 : : * should fail rather than possibly failing to enforce an important
1730 : : * constraint.
1731 : : */
1104 tgl@sss.pgh.pa.us 1732 [ - + ]: 1355 : if (ncheck != rel->rd_rel->relchecks)
1104 tgl@sss.pgh.pa.us 1733 [ # # ]:UBC 0 : elog(ERROR, "%d pg_constraint record(s) missing for relation \"%s\"",
1734 : : rel->rd_rel->relchecks - ncheck, RelationGetRelationName(rel));
1735 : :
1736 : : /*
1737 : : * If first time through for this result relation, build expression
1738 : : * nodetrees for rel's constraint expressions. Keep them in the per-query
1739 : : * memory context so they'll survive throughout the query.
1740 : : */
8554 tgl@sss.pgh.pa.us 1741 [ + + ]:CBC 1355 : if (resultRelInfo->ri_ConstraintExprs == NULL)
1742 : : {
1743 : 629 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1744 : 629 : resultRelInfo->ri_ConstraintExprs =
2588 andres@anarazel.de 1745 : 629 : (ExprState **) palloc(ncheck * sizeof(ExprState *));
8554 tgl@sss.pgh.pa.us 1746 [ + + ]: 1526 : for (i = 0; i < ncheck; i++)
1747 : : {
1748 : : Expr *checkconstr;
1749 : :
2588 andres@anarazel.de 1750 : 900 : checkconstr = stringToNode(check[i].ccbin);
1751 : 897 : resultRelInfo->ri_ConstraintExprs[i] =
1752 : 900 : ExecPrepareExpr(checkconstr, estate);
1753 : : }
8554 tgl@sss.pgh.pa.us 1754 : 626 : MemoryContextSwitchTo(oldContext);
1755 : : }
1756 : :
1757 : : /*
1758 : : * We will use the EState's per-tuple context for evaluating constraint
1759 : : * expressions (creating it if it's not already there).
1760 : : */
8483 1761 [ + + ]: 1352 : econtext = GetPerTupleExprContext(estate);
1762 : :
1763 : : /* Arrange for econtext's scan tuple to be the tuple under test */
8652 1764 : 1352 : econtext->ecxt_scantuple = slot;
1765 : :
1766 : : /* And evaluate the constraints */
9716 bruce@momjian.us 1767 [ + + ]: 2988 : for (i = 0; i < ncheck; i++)
1768 : : {
2588 andres@anarazel.de 1769 : 1857 : ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1770 : :
1771 : : /*
1772 : : * NOTE: SQL specifies that a NULL result from a constraint expression
1773 : : * is not to be treated as a failure. Therefore, use ExecCheck not
1774 : : * ExecQual.
1775 : : */
1776 [ + + ]: 1857 : if (!ExecCheck(checkconstr, econtext))
9357 bruce@momjian.us 1777 : 221 : return check[i].ccname;
1778 : : }
1779 : :
1780 : : /* NULL result means no error */
7573 tgl@sss.pgh.pa.us 1781 : 1131 : return NULL;
1782 : : }
1783 : :
1784 : : /*
1785 : : * ExecPartitionCheck --- check that tuple meets the partition constraint.
1786 : : *
1787 : : * Returns true if it meets the partition constraint. If the constraint
1788 : : * fails and we're asked to emit an error, do so and don't return; otherwise
1789 : : * return false.
1790 : : */
1791 : : bool
2685 rhaas@postgresql.org 1792 : 6748 : ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1793 : : EState *estate, bool emitError)
1794 : : {
1795 : : ExprContext *econtext;
1796 : : bool success;
1797 : :
1798 : : /*
1799 : : * If first time through, build expression state tree for the partition
1800 : : * check expression. (In the corner case where the partition check
1801 : : * expression is empty, ie there's a default partition and nothing else,
1802 : : * we'll be fooled into executing this code each time through. But it's
1803 : : * pretty darn cheap in that case, so we don't worry about it.)
1804 : : */
1805 [ + + ]: 6748 : if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1806 : : {
1807 : : /*
1808 : : * Ensure that the qual tree and prepared expression are in the
1809 : : * query-lifespan context.
1810 : : */
1306 tgl@sss.pgh.pa.us 1811 : 1827 : MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1812 : 1827 : List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc);
1813 : :
2588 andres@anarazel.de 1814 : 1827 : resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1306 tgl@sss.pgh.pa.us 1815 : 1827 : MemoryContextSwitchTo(oldcxt);
1816 : : }
1817 : :
1818 : : /*
1819 : : * We will use the EState's per-tuple context for evaluating constraint
1820 : : * expressions (creating it if it's not already there).
1821 : : */
2685 rhaas@postgresql.org 1822 [ + + ]: 6748 : econtext = GetPerTupleExprContext(estate);
1823 : :
1824 : : /* Arrange for econtext's scan tuple to be the tuple under test */
1825 : 6748 : econtext->ecxt_scantuple = slot;
1826 : :
1827 : : /*
1828 : : * As in case of the cataloged constraints, we treat a NULL result as
1829 : : * success here, not a failure.
1830 : : */
2134 alvherre@alvh.no-ip. 1831 : 6748 : success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1832 : :
1833 : : /* if asked to emit error, don't actually return on failure */
1834 [ + + + + ]: 6748 : if (!success && emitError)
1835 : 101 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1836 : :
1837 : 6647 : return success;
1838 : : }
1839 : :
1840 : : /*
1841 : : * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1842 : : * partition constraint check.
1843 : : */
1844 : : void
2291 rhaas@postgresql.org 1845 : 122 : ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1846 : : TupleTableSlot *slot,
1847 : : EState *estate)
1848 : : {
1849 : : Oid root_relid;
1850 : : TupleDesc tupdesc;
1851 : : char *val_desc;
1852 : : Bitmapset *modifiedCols;
1853 : :
1854 : : /*
1855 : : * If the tuple has been routed, it's been converted to the partition's
1856 : : * rowtype, which might differ from the root table's. We must convert it
1857 : : * back to the root table's rowtype so that val_desc in the error message
1858 : : * matches the input tuple.
1859 : : */
1161 heikki.linnakangas@i 1860 [ + + ]: 122 : if (resultRelInfo->ri_RootResultRelInfo)
1861 : : {
1862 : 10 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1863 : : TupleDesc old_tupdesc;
1864 : : AttrMap *map;
1865 : :
1866 : 10 : root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
1867 : 10 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1868 : :
1934 alvherre@alvh.no-ip. 1869 : 10 : old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1870 : : /* a reverse map */
502 1871 : 10 : map = build_attrmap_by_name_if_req(old_tupdesc, tupdesc, false);
1872 : :
1873 : : /*
1874 : : * Partition-specific slot's tupdesc can't be changed, so allocate a
1875 : : * new one.
1876 : : */
2291 rhaas@postgresql.org 1877 [ + + ]: 10 : if (map != NULL)
2021 andres@anarazel.de 1878 : 4 : slot = execute_attr_map_slot(map, slot,
1879 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1161 heikki.linnakangas@i 1880 : 10 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1881 : 10 : ExecGetUpdatedCols(rootrel, estate));
1882 : : }
1883 : : else
1884 : : {
1934 alvherre@alvh.no-ip. 1885 : 112 : root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1886 : 112 : tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1161 heikki.linnakangas@i 1887 : 112 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1888 : 112 : ExecGetUpdatedCols(resultRelInfo, estate));
1889 : : }
1890 : :
1934 alvherre@alvh.no-ip. 1891 : 122 : val_desc = ExecBuildSlotValueDescription(root_relid,
1892 : : slot,
1893 : : tupdesc,
1894 : : modifiedCols,
1895 : : 64);
2291 rhaas@postgresql.org 1896 [ + - + - ]: 122 : ereport(ERROR,
1897 : : (errcode(ERRCODE_CHECK_VIOLATION),
1898 : : errmsg("new row for relation \"%s\" violates partition constraint",
1899 : : RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1900 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1901 : : errtable(resultRelInfo->ri_RelationDesc)));
1902 : : }
1903 : :
1904 : : /*
1905 : : * ExecConstraints - check constraints of the tuple in 'slot'
1906 : : *
1907 : : * This checks the traditional NOT NULL and check constraints.
1908 : : *
1909 : : * The partition constraint is *NOT* checked.
1910 : : *
1911 : : * Note: 'slot' contains the tuple to check the constraints of, which may
1912 : : * have been converted from the original input tuple after tuple routing.
1913 : : * 'resultRelInfo' is the final result relation, after tuple routing.
1914 : : */
1915 : : void
7573 tgl@sss.pgh.pa.us 1916 : 2109203 : ExecConstraints(ResultRelInfo *resultRelInfo,
1917 : : TupleTableSlot *slot, EState *estate)
1918 : : {
8554 1919 : 2109203 : Relation rel = resultRelInfo->ri_RelationDesc;
3811 1920 : 2109203 : TupleDesc tupdesc = RelationGetDescr(rel);
1921 : 2109203 : TupleConstr *constr = tupdesc->constr;
1922 : : Bitmapset *modifiedCols;
1923 : :
1306 1924 [ - + ]: 2109203 : Assert(constr); /* we should not be called otherwise */
1925 : :
1926 [ + + ]: 2109203 : if (constr->has_not_null)
1927 : : {
3811 1928 : 2106311 : int natts = tupdesc->natts;
1929 : : int attrChk;
1930 : :
8652 1931 [ + + ]: 7682259 : for (attrChk = 1; attrChk <= natts; attrChk++)
1932 : : {
2429 andres@anarazel.de 1933 : 5576087 : Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1934 : :
1935 [ + + + + ]: 5576087 : if (att->attnotnull && slot_attisnull(slot, attrChk))
1936 : : {
1937 : : char *val_desc;
2657 rhaas@postgresql.org 1938 : 139 : Relation orig_rel = rel;
2561 1939 : 139 : TupleDesc orig_tupdesc = RelationGetDescr(rel);
1940 : :
1941 : : /*
1942 : : * If the tuple has been routed, it's been converted to the
1943 : : * partition's rowtype, which might differ from the root
1944 : : * table's. We must convert it back to the root table's
1945 : : * rowtype so that val_desc shown error message matches the
1946 : : * input tuple.
1947 : : */
1161 heikki.linnakangas@i 1948 [ + + ]: 139 : if (resultRelInfo->ri_RootResultRelInfo)
1949 : : {
1950 : 30 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1951 : : AttrMap *map;
1952 : :
1953 : 30 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1954 : : /* a reverse map */
1579 michael@paquier.xyz 1955 : 30 : map = build_attrmap_by_name_if_req(orig_tupdesc,
1956 : : tupdesc,
1957 : : false);
1958 : :
1959 : : /*
1960 : : * Partition-specific slot's tupdesc can't be changed, so
1961 : : * allocate a new one.
1962 : : */
2561 rhaas@postgresql.org 1963 [ + + ]: 30 : if (map != NULL)
2021 andres@anarazel.de 1964 : 21 : slot = execute_attr_map_slot(map, slot,
1965 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1161 heikki.linnakangas@i 1966 : 30 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1967 : 30 : ExecGetUpdatedCols(rootrel, estate));
1968 : 30 : rel = rootrel->ri_RelationDesc;
1969 : : }
1970 : : else
1971 : 109 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1972 : 109 : ExecGetUpdatedCols(resultRelInfo, estate));
3380 sfrost@snowman.net 1973 : 139 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1974 : : slot,
1975 : : tupdesc,
1976 : : modifiedCols,
1977 : : 64);
1978 : :
7573 tgl@sss.pgh.pa.us 1979 [ + - + - ]: 139 : ereport(ERROR,
1980 : : (errcode(ERRCODE_NOT_NULL_VIOLATION),
1981 : : errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
1982 : : NameStr(att->attname),
1983 : : RelationGetRelationName(orig_rel)),
1984 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1985 : : errtablecol(orig_rel, attrChk)));
1986 : : }
1987 : : }
1988 : : }
1989 : :
1104 1990 [ + + ]: 2109064 : if (rel->rd_rel->relchecks > 0)
1991 : : {
1992 : : const char *failed;
1993 : :
8554 1994 [ + + ]: 1355 : if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
1995 : : {
1996 : : char *val_desc;
2657 rhaas@postgresql.org 1997 : 221 : Relation orig_rel = rel;
1998 : :
1999 : : /* See the comment above. */
1161 heikki.linnakangas@i 2000 [ + + ]: 221 : if (resultRelInfo->ri_RootResultRelInfo)
2001 : : {
2002 : 48 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2561 rhaas@postgresql.org 2003 : 48 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2004 : : AttrMap *map;
2005 : :
1161 heikki.linnakangas@i 2006 : 48 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2007 : : /* a reverse map */
1579 michael@paquier.xyz 2008 : 48 : map = build_attrmap_by_name_if_req(old_tupdesc,
2009 : : tupdesc,
2010 : : false);
2011 : :
2012 : : /*
2013 : : * Partition-specific slot's tupdesc can't be changed, so
2014 : : * allocate a new one.
2015 : : */
2561 rhaas@postgresql.org 2016 [ + + ]: 48 : if (map != NULL)
2021 andres@anarazel.de 2017 : 30 : slot = execute_attr_map_slot(map, slot,
2018 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1161 heikki.linnakangas@i 2019 : 48 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2020 : 48 : ExecGetUpdatedCols(rootrel, estate));
2021 : 48 : rel = rootrel->ri_RelationDesc;
2022 : : }
2023 : : else
2024 : 173 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2025 : 173 : ExecGetUpdatedCols(resultRelInfo, estate));
3380 sfrost@snowman.net 2026 : 221 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2027 : : slot,
2028 : : tupdesc,
2029 : : modifiedCols,
2030 : : 64);
7573 tgl@sss.pgh.pa.us 2031 [ + - + - ]: 221 : ereport(ERROR,
2032 : : (errcode(ERRCODE_CHECK_VIOLATION),
2033 : : errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2034 : : RelationGetRelationName(orig_rel), failed),
2035 : : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2036 : : errtableconstraint(orig_rel, failed)));
2037 : : }
2038 : : }
9732 vadim4o@yahoo.com 2039 : 2108840 : }
2040 : :
2041 : : /*
2042 : : * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2043 : : * of the specified kind.
2044 : : *
2045 : : * Note that this needs to be called multiple times to ensure that all kinds of
2046 : : * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2047 : : * CHECK OPTION set and from row-level security policies). See ExecInsert()
2048 : : * and ExecUpdate().
2049 : : */
2050 : : void
3278 sfrost@snowman.net 2051 : 994 : ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2052 : : TupleTableSlot *slot, EState *estate)
2053 : : {
3380 2054 : 994 : Relation rel = resultRelInfo->ri_RelationDesc;
2055 : 994 : TupleDesc tupdesc = RelationGetDescr(rel);
2056 : : ExprContext *econtext;
2057 : : ListCell *l1,
2058 : : *l2;
2059 : :
2060 : : /*
2061 : : * We will use the EState's per-tuple context for evaluating constraint
2062 : : * expressions (creating it if it's not already there).
2063 : : */
3923 2064 [ + + ]: 994 : econtext = GetPerTupleExprContext(estate);
2065 : :
2066 : : /* Arrange for econtext's scan tuple to be the tuple under test */
2067 : 994 : econtext->ecxt_scantuple = slot;
2068 : :
2069 : : /* Check each of the constraints */
2070 [ + - + + : 2363 : forboth(l1, resultRelInfo->ri_WithCheckOptions,
+ - + + +
+ + - +
+ ]
2071 : : l2, resultRelInfo->ri_WithCheckOptionExprs)
2072 : : {
2073 : 1618 : WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
3631 bruce@momjian.us 2074 : 1618 : ExprState *wcoExpr = (ExprState *) lfirst(l2);
2075 : :
2076 : : /*
2077 : : * Skip any WCOs which are not the kind we are looking for at this
2078 : : * time.
2079 : : */
3278 sfrost@snowman.net 2080 [ + + ]: 1618 : if (wco->kind != kind)
2081 : 905 : continue;
2082 : :
2083 : : /*
2084 : : * WITH CHECK OPTION checks are intended to ensure that the new tuple
2085 : : * is visible (in the case of a view) or that it passes the
2086 : : * 'with-check' policy (in the case of row security). If the qual
2087 : : * evaluates to NULL or FALSE, then the new tuple won't be included in
2088 : : * the view or doesn't pass the 'with-check' policy for the table.
2089 : : */
2588 andres@anarazel.de 2090 [ + + ]: 713 : if (!ExecQual(wcoExpr, econtext))
2091 : : {
2092 : : char *val_desc;
2093 : : Bitmapset *modifiedCols;
2094 : :
3278 sfrost@snowman.net 2095 [ + + + + : 249 : switch (wco->kind)
- ]
2096 : : {
2097 : : /*
2098 : : * For WITH CHECK OPTIONs coming from views, we might be
2099 : : * able to provide the details on the row, depending on
2100 : : * the permissions on the relation (that is, if the user
2101 : : * could view it directly anyway). For RLS violations, we
2102 : : * don't include the data since we don't know if the user
2103 : : * should be able to view the tuple as that depends on the
2104 : : * USING policy.
2105 : : */
2106 : 111 : case WCO_VIEW_CHECK:
2107 : : /* See the comment in ExecConstraints(). */
1161 heikki.linnakangas@i 2108 [ + + ]: 111 : if (resultRelInfo->ri_RootResultRelInfo)
2109 : : {
2110 : 18 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2463 rhaas@postgresql.org 2111 : 18 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2112 : : AttrMap *map;
2113 : :
1161 heikki.linnakangas@i 2114 : 18 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2115 : : /* a reverse map */
1579 michael@paquier.xyz 2116 : 18 : map = build_attrmap_by_name_if_req(old_tupdesc,
2117 : : tupdesc,
2118 : : false);
2119 : :
2120 : : /*
2121 : : * Partition-specific slot's tupdesc can't be changed,
2122 : : * so allocate a new one.
2123 : : */
2463 rhaas@postgresql.org 2124 [ + + ]: 18 : if (map != NULL)
2021 andres@anarazel.de 2125 : 9 : slot = execute_attr_map_slot(map, slot,
2126 : : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2127 : :
1161 heikki.linnakangas@i 2128 : 18 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2129 : 18 : ExecGetUpdatedCols(rootrel, estate));
2130 : 18 : rel = rootrel->ri_RelationDesc;
2131 : : }
2132 : : else
2133 : 93 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2134 : 93 : ExecGetUpdatedCols(resultRelInfo, estate));
3278 sfrost@snowman.net 2135 : 111 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2136 : : slot,
2137 : : tupdesc,
2138 : : modifiedCols,
2139 : : 64);
2140 : :
2141 [ + - + - ]: 111 : ereport(ERROR,
2142 : : (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2143 : : errmsg("new row violates check option for view \"%s\"",
2144 : : wco->relname),
2145 : : val_desc ? errdetail("Failing row contains %s.",
2146 : : val_desc) : 0));
2147 : : break;
2148 : 114 : case WCO_RLS_INSERT_CHECK:
2149 : : case WCO_RLS_UPDATE_CHECK:
3134 2150 [ + + ]: 114 : if (wco->polname != NULL)
2151 [ + - ]: 24 : ereport(ERROR,
2152 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2153 : : errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2154 : : wco->polname, wco->relname)));
2155 : : else
2156 [ + - ]: 90 : ereport(ERROR,
2157 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2158 : : errmsg("new row violates row-level security policy for table \"%s\"",
2159 : : wco->relname)));
2160 : : break;
748 alvherre@alvh.no-ip. 2161 : 12 : case WCO_RLS_MERGE_UPDATE_CHECK:
2162 : : case WCO_RLS_MERGE_DELETE_CHECK:
2163 [ - + ]: 12 : if (wco->polname != NULL)
748 alvherre@alvh.no-ip. 2164 [ # # ]:UBC 0 : ereport(ERROR,
2165 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2166 : : errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2167 : : wco->polname, wco->relname)));
2168 : : else
748 alvherre@alvh.no-ip. 2169 [ + - ]:CBC 12 : ereport(ERROR,
2170 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2171 : : errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2172 : : wco->relname)));
2173 : : break;
3264 andres@anarazel.de 2174 : 12 : case WCO_RLS_CONFLICT_CHECK:
3134 sfrost@snowman.net 2175 [ - + ]: 12 : if (wco->polname != NULL)
3134 sfrost@snowman.net 2176 [ # # ]:UBC 0 : ereport(ERROR,
2177 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2178 : : errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2179 : : wco->polname, wco->relname)));
2180 : : else
3134 sfrost@snowman.net 2181 [ + - ]:CBC 12 : ereport(ERROR,
2182 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2183 : : errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2184 : : wco->relname)));
2185 : : break;
3278 sfrost@snowman.net 2186 :UBC 0 : default:
2187 [ # # ]: 0 : elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2188 : : break;
2189 : : }
2190 : : }
2191 : : }
3923 sfrost@snowman.net 2192 :CBC 745 : }
2193 : :
2194 : : /*
2195 : : * ExecBuildSlotValueDescription -- construct a string representing a tuple
2196 : : *
2197 : : * This is intentionally very similar to BuildIndexValueDescription, but
2198 : : * unlike that function, we truncate long field values (to at most maxfieldlen
2199 : : * bytes). That seems necessary here since heap field values could be very
2200 : : * long, whereas index entries typically aren't so wide.
2201 : : *
2202 : : * Also, unlike the case with index entries, we need to be prepared to ignore
2203 : : * dropped columns. We used to use the slot's tuple descriptor to decode the
2204 : : * data, but the slot's descriptor doesn't identify dropped columns, so we
2205 : : * now need to be passed the relation's descriptor.
2206 : : *
2207 : : * Note that, like BuildIndexValueDescription, if the user does not have
2208 : : * permission to view any of the columns involved, a NULL is returned. Unlike
2209 : : * BuildIndexValueDescription, if the user has access to view a subset of the
2210 : : * column involved, that subset will be returned with a key identifying which
2211 : : * columns they are.
2212 : : */
2213 : : static char *
3380 2214 : 593 : ExecBuildSlotValueDescription(Oid reloid,
2215 : : TupleTableSlot *slot,
2216 : : TupleDesc tupdesc,
2217 : : Bitmapset *modifiedCols,
2218 : : int maxfieldlen)
2219 : : {
2220 : : StringInfoData buf;
2221 : : StringInfoData collist;
3811 tgl@sss.pgh.pa.us 2222 : 593 : bool write_comma = false;
3380 sfrost@snowman.net 2223 : 593 : bool write_comma_collist = false;
2224 : : int i;
2225 : : AclResult aclresult;
2226 : 593 : bool table_perm = false;
2227 : 593 : bool any_perm = false;
2228 : :
2229 : : /*
2230 : : * Check if RLS is enabled and should be active for the relation; if so,
2231 : : * then don't return anything. Otherwise, go through normal permission
2232 : : * checks.
2233 : : */
3183 mail@joeconway.com 2234 [ - + ]: 593 : if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
3380 sfrost@snowman.net 2235 :UBC 0 : return NULL;
2236 : :
4520 tgl@sss.pgh.pa.us 2237 :CBC 593 : initStringInfo(&buf);
2238 : :
2239 : 593 : appendStringInfoChar(&buf, '(');
2240 : :
2241 : : /*
2242 : : * Check if the user has permissions to see the row. Table-level SELECT
2243 : : * allows access to all columns. If the user does not have table-level
2244 : : * SELECT then we check each column and include those the user has SELECT
2245 : : * rights on. Additionally, we always include columns the user provided
2246 : : * data for.
2247 : : */
3380 sfrost@snowman.net 2248 : 593 : aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2249 [ + + ]: 593 : if (aclresult != ACLCHECK_OK)
2250 : : {
2251 : : /* Set up the buffer for the column list */
2252 : 30 : initStringInfo(&collist);
2253 : 30 : appendStringInfoChar(&collist, '(');
2254 : : }
2255 : : else
2256 : 563 : table_perm = any_perm = true;
2257 : :
2258 : : /* Make sure the tuple is fully deconstructed */
2259 : 593 : slot_getallattrs(slot);
2260 : :
4520 tgl@sss.pgh.pa.us 2261 [ + + ]: 2160 : for (i = 0; i < tupdesc->natts; i++)
2262 : : {
3380 sfrost@snowman.net 2263 : 1567 : bool column_perm = false;
2264 : : char *val;
2265 : : int vallen;
2429 andres@anarazel.de 2266 : 1567 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2267 : :
2268 : : /* ignore dropped columns */
2269 [ + + ]: 1567 : if (att->attisdropped)
3811 tgl@sss.pgh.pa.us 2270 : 19 : continue;
2271 : :
3380 sfrost@snowman.net 2272 [ + + ]: 1548 : if (!table_perm)
2273 : : {
2274 : : /*
2275 : : * No table-level SELECT, so need to make sure they either have
2276 : : * SELECT rights on the column or that they have provided the data
2277 : : * for the column. If not, omit this column from the error
2278 : : * message.
2279 : : */
2429 andres@anarazel.de 2280 : 117 : aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2281 : : GetUserId(), ACL_SELECT);
2282 [ + + ]: 117 : if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
3380 sfrost@snowman.net 2283 [ + + ]: 69 : modifiedCols) || aclresult == ACLCHECK_OK)
2284 : : {
2285 : 72 : column_perm = any_perm = true;
2286 : :
2287 [ + + ]: 72 : if (write_comma_collist)
2288 : 42 : appendStringInfoString(&collist, ", ");
2289 : : else
2290 : 30 : write_comma_collist = true;
2291 : :
2429 andres@anarazel.de 2292 : 72 : appendStringInfoString(&collist, NameStr(att->attname));
2293 : : }
2294 : : }
2295 : :
3380 sfrost@snowman.net 2296 [ + + + + ]: 1548 : if (table_perm || column_perm)
2297 : : {
2298 [ + + ]: 1503 : if (slot->tts_isnull[i])
2299 : 272 : val = "null";
2300 : : else
2301 : : {
2302 : : Oid foutoid;
2303 : : bool typisvarlena;
2304 : :
2429 andres@anarazel.de 2305 : 1231 : getTypeOutputInfo(att->atttypid,
2306 : : &foutoid, &typisvarlena);
3380 sfrost@snowman.net 2307 : 1231 : val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2308 : : }
2309 : :
2310 [ + + ]: 1503 : if (write_comma)
2311 : 910 : appendStringInfoString(&buf, ", ");
2312 : : else
2313 : 593 : write_comma = true;
2314 : :
2315 : : /* truncate if needed */
2316 : 1503 : vallen = strlen(val);
2317 [ + - ]: 1503 : if (vallen <= maxfieldlen)
1727 drowley@postgresql.o 2318 : 1503 : appendBinaryStringInfo(&buf, val, vallen);
2319 : : else
2320 : : {
3380 sfrost@snowman.net 2321 :UBC 0 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2322 : 0 : appendBinaryStringInfo(&buf, val, vallen);
2323 : 0 : appendStringInfoString(&buf, "...");
2324 : : }
2325 : : }
2326 : : }
2327 : :
2328 : : /* If we end up with zero columns being returned, then return NULL. */
3380 sfrost@snowman.net 2329 [ - + ]:CBC 593 : if (!any_perm)
3380 sfrost@snowman.net 2330 :UBC 0 : return NULL;
2331 : :
4520 tgl@sss.pgh.pa.us 2332 :CBC 593 : appendStringInfoChar(&buf, ')');
2333 : :
3380 sfrost@snowman.net 2334 [ + + ]: 593 : if (!table_perm)
2335 : : {
2336 : 30 : appendStringInfoString(&collist, ") = ");
1727 drowley@postgresql.o 2337 : 30 : appendBinaryStringInfo(&collist, buf.data, buf.len);
2338 : :
3380 sfrost@snowman.net 2339 : 30 : return collist.data;
2340 : : }
2341 : :
4520 tgl@sss.pgh.pa.us 2342 : 563 : return buf.data;
2343 : : }
2344 : :
2345 : :
2346 : : /*
2347 : : * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2348 : : * given ResultRelInfo
2349 : : */
2350 : : LockTupleMode
3264 andres@anarazel.de 2351 : 3921 : ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2352 : : {
2353 : : Bitmapset *keyCols;
2354 : : Bitmapset *updatedCols;
2355 : :
2356 : : /*
2357 : : * Compute lock mode to use. If columns that are part of the key have not
2358 : : * been modified, then we can use a weaker lock, allowing for better
2359 : : * concurrency.
2360 : : */
1161 heikki.linnakangas@i 2361 : 3921 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3264 andres@anarazel.de 2362 : 3921 : keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2363 : : INDEX_ATTR_BITMAP_KEY);
2364 : :
2365 [ + + ]: 3921 : if (bms_overlap(keyCols, updatedCols))
2366 : 124 : return LockTupleExclusive;
2367 : :
2368 : 3797 : return LockTupleNoKeyExclusive;
2369 : : }
2370 : :
2371 : : /*
2372 : : * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2373 : : *
2374 : : * If no such struct, either return NULL or throw error depending on missing_ok
2375 : : */
2376 : : ExecRowMark *
3260 tgl@sss.pgh.pa.us 2377 : 5386 : ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2378 : : {
2015 2379 [ + - + - ]: 5386 : if (rti > 0 && rti <= estate->es_range_table_size &&
2380 [ + - ]: 5386 : estate->es_rowmarks != NULL)
2381 : : {
2382 : 5386 : ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2383 : :
2384 [ + - ]: 5386 : if (erm)
4841 2385 : 5386 : return erm;
2386 : : }
3260 tgl@sss.pgh.pa.us 2387 [ # # ]:UBC 0 : if (!missing_ok)
2388 [ # # ]: 0 : elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2389 : 0 : return NULL;
2390 : : }
2391 : :
2392 : : /*
2393 : : * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2394 : : *
2395 : : * Inputs are the underlying ExecRowMark struct and the targetlist of the
2396 : : * input plan node (not planstate node!). We need the latter to find out
2397 : : * the column numbers of the resjunk columns.
2398 : : */
2399 : : ExecAuxRowMark *
4841 tgl@sss.pgh.pa.us 2400 :CBC 5386 : ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2401 : : {
2402 : 5386 : ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2403 : : char resname[32];
2404 : :
2405 : 5386 : aerm->rowmark = erm;
2406 : :
2407 : : /* Look up the resjunk columns associated with this rowmark */
3311 2408 [ + + ]: 5386 : if (erm->markType != ROW_MARK_COPY)
2409 : : {
2410 : : /* need ctid for all methods other than COPY */
4813 2411 : 5062 : snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
4841 2412 : 5062 : aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2413 : : resname);
4813 2414 [ - + ]: 5062 : if (!AttributeNumberIsValid(aerm->ctidAttNo))
4813 tgl@sss.pgh.pa.us 2415 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2416 : : }
2417 : : else
2418 : : {
2419 : : /* need wholerow if COPY */
4813 tgl@sss.pgh.pa.us 2420 :CBC 324 : snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
4841 2421 : 324 : aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2422 : : resname);
4813 2423 [ - + ]: 324 : if (!AttributeNumberIsValid(aerm->wholeAttNo))
4813 tgl@sss.pgh.pa.us 2424 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2425 : : }
2426 : :
2427 : : /* if child rel, need tableoid */
3311 tgl@sss.pgh.pa.us 2428 [ + + ]:CBC 5386 : if (erm->rti != erm->prti)
2429 : : {
2430 : 914 : snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2431 : 914 : aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2432 : : resname);
2433 [ - + ]: 914 : if (!AttributeNumberIsValid(aerm->toidAttNo))
3311 tgl@sss.pgh.pa.us 2434 [ # # ]:UBC 0 : elog(ERROR, "could not find junk %s column", resname);
2435 : : }
2436 : :
4841 tgl@sss.pgh.pa.us 2437 :CBC 5386 : return aerm;
2438 : : }
2439 : :
2440 : :
2441 : : /*
2442 : : * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2443 : : * process the updated version under READ COMMITTED rules.
2444 : : *
2445 : : * See backend/executor/README for some info about how this works.
2446 : : */
2447 : :
2448 : :
2449 : : /*
2450 : : * Check the updated version of a tuple to see if we want to process it under
2451 : : * READ COMMITTED rules.
2452 : : *
2453 : : * epqstate - state for EvalPlanQual rechecking
2454 : : * relation - table containing tuple
2455 : : * rti - rangetable index of table containing tuple
2456 : : * inputslot - tuple for processing - this can be the slot from
2457 : : * EvalPlanQualSlot() for this rel, for increased efficiency.
2458 : : *
2459 : : * This tests whether the tuple in inputslot still matches the relevant
2460 : : * quals. For that result to be useful, typically the input tuple has to be
2461 : : * last row version (otherwise the result isn't particularly useful) and
2462 : : * locked (otherwise the result might be out of date). That's typically
2463 : : * achieved by using table_tuple_lock() with the
2464 : : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2465 : : *
2466 : : * Returns a slot containing the new candidate update/delete tuple, or
2467 : : * NULL if we determine we shouldn't process the row.
2468 : : */
2469 : : TupleTableSlot *
1683 andres@anarazel.de 2470 : 129 : EvalPlanQual(EPQState *epqstate, Relation relation,
2471 : : Index rti, TupleTableSlot *inputslot)
2472 : : {
2473 : : TupleTableSlot *slot;
2474 : : TupleTableSlot *testslot;
2475 : :
5284 tgl@sss.pgh.pa.us 2476 [ - + ]: 129 : Assert(rti > 0);
2477 : :
2478 : : /*
2479 : : * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2480 : : */
1683 andres@anarazel.de 2481 : 129 : EvalPlanQualBegin(epqstate);
2482 : :
2483 : : /*
2484 : : * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2485 : : * an unnecessary copy.
2486 : : */
1871 2487 : 129 : testslot = EvalPlanQualSlot(epqstate, relation, rti);
1849 2488 [ + + ]: 129 : if (testslot != inputslot)
2489 : 6 : ExecCopySlot(testslot, inputslot);
2490 : :
2491 : : /*
2492 : : * Mark that an EPQ tuple is available for this relation. (If there is
2493 : : * more than one result relation, the others remain marked as having no
2494 : : * tuple available.)
2495 : : */
331 tgl@sss.pgh.pa.us 2496 : 129 : epqstate->relsubs_done[rti - 1] = false;
2497 : 129 : epqstate->relsubs_blocked[rti - 1] = false;
2498 : :
2499 : : /*
2500 : : * Run the EPQ query. We assume it will return at most one tuple.
2501 : : */
5284 2502 : 129 : slot = EvalPlanQualNext(epqstate);
2503 : :
2504 : : /*
2505 : : * If we got a tuple, force the slot to materialize the tuple so that it
2506 : : * is not dependent on any local state in the EPQ query (in particular,
2507 : : * it's highly likely that the slot contains references to any pass-by-ref
2508 : : * datums that may be present in copyTuple). As with the next step, this
2509 : : * is to guard against early re-use of the EPQ query.
2510 : : */
5238 2511 [ + + + + ]: 129 : if (!TupIsNull(slot))
1977 andres@anarazel.de 2512 : 95 : ExecMaterializeSlot(slot);
2513 : :
2514 : : /*
2515 : : * Clear out the test tuple, and mark that no tuple is available here.
2516 : : * This is needed in case the EPQ state is re-used to test a tuple for a
2517 : : * different target relation.
2518 : : */
1871 2519 : 129 : ExecClearTuple(testslot);
331 tgl@sss.pgh.pa.us 2520 : 129 : epqstate->relsubs_blocked[rti - 1] = true;
2521 : :
5298 2522 : 129 : return slot;
2523 : : }
2524 : :
2525 : : /*
2526 : : * EvalPlanQualInit -- initialize during creation of a plan state node
2527 : : * that might need to invoke EPQ processing.
2528 : : *
2529 : : * If the caller intends to use EvalPlanQual(), resultRelations should be
2530 : : * a list of RT indexes of potential target relations for EvalPlanQual(),
2531 : : * and we will arrange that the other listed relations don't return any
2532 : : * tuple during an EvalPlanQual() call. Otherwise resultRelations
2533 : : * should be NIL.
2534 : : *
2535 : : * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2536 : : * with EvalPlanQualSetPlan.
2537 : : */
2538 : : void
1683 andres@anarazel.de 2539 : 132907 : EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2540 : : Plan *subplan, List *auxrowmarks,
2541 : : int epqParam, List *resultRelations)
2542 : : {
2543 : 132907 : Index rtsize = parentestate->es_range_table_size;
2544 : :
2545 : : /* initialize data not changing over EPQState's lifetime */
2546 : 132907 : epqstate->parentestate = parentestate;
2547 : 132907 : epqstate->epqParam = epqParam;
331 tgl@sss.pgh.pa.us 2548 : 132907 : epqstate->resultRelations = resultRelations;
2549 : :
2550 : : /*
2551 : : * Allocate space to reference a slot for each potential rti - do so now
2552 : : * rather than in EvalPlanQualBegin(), as done for other dynamically
2553 : : * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2554 : : * that *may* need EPQ later, without forcing the overhead of
2555 : : * EvalPlanQualBegin().
2556 : : */
1683 andres@anarazel.de 2557 : 132907 : epqstate->tuple_table = NIL;
2558 : 132907 : epqstate->relsubs_slot = (TupleTableSlot **)
2559 : 132907 : palloc0(rtsize * sizeof(TupleTableSlot *));
2560 : :
2561 : : /* ... and remember data that EvalPlanQualBegin will need */
5284 tgl@sss.pgh.pa.us 2562 : 132907 : epqstate->plan = subplan;
4841 2563 : 132907 : epqstate->arowMarks = auxrowmarks;
2564 : :
2565 : : /* ... and mark the EPQ state inactive */
1683 andres@anarazel.de 2566 : 132907 : epqstate->origslot = NULL;
2567 : 132907 : epqstate->recheckestate = NULL;
2568 : 132907 : epqstate->recheckplanstate = NULL;
2569 : 132907 : epqstate->relsubs_rowmark = NULL;
2570 : 132907 : epqstate->relsubs_done = NULL;
331 tgl@sss.pgh.pa.us 2571 : 132907 : epqstate->relsubs_blocked = NULL;
5284 2572 : 132907 : }
2573 : :
2574 : : /*
2575 : : * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2576 : : *
2577 : : * We used to need this so that ModifyTable could deal with multiple subplans.
2578 : : * It could now be refactored out of existence.
2579 : : */
2580 : : void
4841 2581 : 56566 : EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2582 : : {
2583 : : /* If we have a live EPQ query, shut it down */
5284 2584 : 56566 : EvalPlanQualEnd(epqstate);
2585 : : /* And set/change the plan pointer */
2586 : 56566 : epqstate->plan = subplan;
2587 : : /* The rowmarks depend on the plan, too */
4841 2588 : 56566 : epqstate->arowMarks = auxrowmarks;
5284 2589 : 56566 : }
2590 : :
2591 : : /*
2592 : : * Return, and create if necessary, a slot for an EPQ test tuple.
2593 : : *
2594 : : * Note this only requires EvalPlanQualInit() to have been called,
2595 : : * EvalPlanQualBegin() is not necessary.
2596 : : */
2597 : : TupleTableSlot *
1871 andres@anarazel.de 2598 : 6552 : EvalPlanQualSlot(EPQState *epqstate,
2599 : : Relation relation, Index rti)
2600 : : {
2601 : : TupleTableSlot **slot;
2602 : :
1683 2603 [ - + ]: 6552 : Assert(relation);
2604 [ + - - + ]: 6552 : Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2605 : 6552 : slot = &epqstate->relsubs_slot[rti - 1];
2606 : :
1871 2607 [ + + ]: 6552 : if (*slot == NULL)
2608 : : {
2609 : : MemoryContext oldcontext;
2610 : :
1683 2611 : 2914 : oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2612 : 2914 : *slot = table_slot_create(relation, &epqstate->tuple_table);
1871 2613 : 2914 : MemoryContextSwitchTo(oldcontext);
2614 : : }
2615 : :
2616 : 6552 : return *slot;
2617 : : }
2618 : :
2619 : : /*
2620 : : * Fetch the current row value for a non-locked relation, identified by rti,
2621 : : * that needs to be scanned by an EvalPlanQual operation. origslot must have
2622 : : * been set to contain the current result row (top-level row) that we need to
2623 : : * recheck. Returns true if a substitution tuple was found, false if not.
2624 : : */
2625 : : bool
1683 2626 : 17 : EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2627 : : {
2628 : 17 : ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2629 : 17 : ExecRowMark *erm = earm->rowmark;
2630 : : Datum datum;
2631 : : bool isNull;
2632 : :
2633 [ - + ]: 17 : Assert(earm != NULL);
5284 tgl@sss.pgh.pa.us 2634 [ - + ]: 17 : Assert(epqstate->origslot != NULL);
2635 : :
1683 andres@anarazel.de 2636 [ - + ]: 17 : if (RowMarkRequiresRowShareLock(erm->markType))
1683 andres@anarazel.de 2637 [ # # ]:UBC 0 : elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2638 : :
2639 : : /* if child rel, must check whether it produced this row */
1683 andres@anarazel.de 2640 [ - + ]:CBC 17 : if (erm->rti != erm->prti)
2641 : : {
2642 : : Oid tableoid;
2643 : :
1683 andres@anarazel.de 2644 :UBC 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2645 : 0 : earm->toidAttNo,
2646 : : &isNull);
2647 : : /* non-locked rels could be on the inside of outer joins */
2648 [ # # ]: 0 : if (isNull)
2649 : 0 : return false;
2650 : :
2651 : 0 : tableoid = DatumGetObjectId(datum);
2652 : :
2653 [ # # ]: 0 : Assert(OidIsValid(erm->relid));
2654 [ # # ]: 0 : if (tableoid != erm->relid)
2655 : : {
2656 : : /* this child is inactive right now */
2657 : 0 : return false;
2658 : : }
2659 : : }
2660 : :
1683 andres@anarazel.de 2661 [ + + ]:CBC 17 : if (erm->markType == ROW_MARK_REFERENCE)
2662 : : {
2663 [ - + ]: 13 : Assert(erm->relation != NULL);
2664 : :
2665 : : /* fetch the tuple's ctid */
2666 : 13 : datum = ExecGetJunkAttribute(epqstate->origslot,
2667 : 13 : earm->ctidAttNo,
2668 : : &isNull);
2669 : : /* non-locked rels could be on the inside of outer joins */
2670 [ - + ]: 13 : if (isNull)
1683 andres@anarazel.de 2671 :UBC 0 : return false;
2672 : :
2673 : : /* fetch requests on foreign tables must be passed to their FDW */
1683 andres@anarazel.de 2674 [ - + ]:CBC 13 : if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2675 : : {
2676 : : FdwRoutine *fdwroutine;
1683 andres@anarazel.de 2677 :UBC 0 : bool updated = false;
2678 : :
2679 : 0 : fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2680 : : /* this should have been checked already, but let's be safe */
2681 [ # # ]: 0 : if (fdwroutine->RefetchForeignRow == NULL)
2682 [ # # ]: 0 : ereport(ERROR,
2683 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2684 : : errmsg("cannot lock rows in foreign table \"%s\"",
2685 : : RelationGetRelationName(erm->relation))));
2686 : :
2687 : 0 : fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2688 : : erm,
2689 : : datum,
2690 : : slot,
2691 : : &updated);
2692 [ # # # # ]: 0 : if (TupIsNull(slot))
2693 [ # # ]: 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2694 : :
2695 : : /*
2696 : : * Ideally we'd insist on updated == false here, but that assumes
2697 : : * that FDWs can track that exactly, which they might not be able
2698 : : * to. So just ignore the flag.
2699 : : */
2700 : 0 : return true;
2701 : : }
2702 : : else
2703 : : {
2704 : : /* ordinary table, fetch the tuple */
1683 andres@anarazel.de 2705 [ - + ]:CBC 13 : if (!table_tuple_fetch_row_version(erm->relation,
2706 : 13 : (ItemPointer) DatumGetPointer(datum),
2707 : : SnapshotAny, slot))
1683 andres@anarazel.de 2708 [ # # ]:UBC 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
1683 andres@anarazel.de 2709 :CBC 13 : return true;
2710 : : }
2711 : : }
2712 : : else
2713 : : {
2714 [ - + ]: 4 : Assert(erm->markType == ROW_MARK_COPY);
2715 : :
2716 : : /* fetch the whole-row Var for the relation */
2717 : 4 : datum = ExecGetJunkAttribute(epqstate->origslot,
2718 : 4 : earm->wholeAttNo,
2719 : : &isNull);
2720 : : /* non-locked rels could be on the inside of outer joins */
2721 [ - + ]: 4 : if (isNull)
1683 andres@anarazel.de 2722 :UBC 0 : return false;
2723 : :
1683 andres@anarazel.de 2724 :CBC 4 : ExecStoreHeapTupleDatum(datum, slot);
2725 : 4 : return true;
2726 : : }
2727 : : }
2728 : :
2729 : : /*
2730 : : * Fetch the next row (if any) from EvalPlanQual testing
2731 : : *
2732 : : * (In practice, there should never be more than one row...)
2733 : : */
2734 : : TupleTableSlot *
5284 tgl@sss.pgh.pa.us 2735 : 156 : EvalPlanQualNext(EPQState *epqstate)
2736 : : {
2737 : : MemoryContext oldcontext;
2738 : : TupleTableSlot *slot;
2739 : :
1683 andres@anarazel.de 2740 : 156 : oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2741 : 156 : slot = ExecProcNode(epqstate->recheckplanstate);
7788 tgl@sss.pgh.pa.us 2742 : 156 : MemoryContextSwitchTo(oldcontext);
2743 : :
5298 2744 : 156 : return slot;
2745 : : }
2746 : :
2747 : : /*
2748 : : * Initialize or reset an EvalPlanQual state tree
2749 : : */
2750 : : void
1683 andres@anarazel.de 2751 : 184 : EvalPlanQualBegin(EPQState *epqstate)
2752 : : {
2753 : 184 : EState *parentestate = epqstate->parentestate;
2754 : 184 : EState *recheckestate = epqstate->recheckestate;
2755 : :
2756 [ + + ]: 184 : if (recheckestate == NULL)
2757 : : {
2758 : : /* First time through, so create a child EState */
2759 : 112 : EvalPlanQualStart(epqstate, epqstate->plan);
2760 : : }
2761 : : else
2762 : : {
2763 : : /*
2764 : : * We already have a suitable child EPQ tree, so just reset it.
2765 : : */
2019 tgl@sss.pgh.pa.us 2766 : 72 : Index rtsize = parentestate->es_range_table_size;
1683 andres@anarazel.de 2767 : 72 : PlanState *rcplanstate = epqstate->recheckplanstate;
2768 : :
2769 : : /*
2770 : : * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
2771 : : * the EPQ run will never attempt to fetch tuples from blocked target
2772 : : * relations.
2773 : : */
331 tgl@sss.pgh.pa.us 2774 : 72 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2775 : : rtsize * sizeof(bool));
2776 : :
2777 : : /* Recopy current values of parent parameters */
2344 rhaas@postgresql.org 2778 [ + - ]: 72 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2779 : : {
2780 : : int i;
2781 : :
2782 : : /*
2783 : : * Force evaluation of any InitPlan outputs that could be needed
2784 : : * by the subplan, just in case they got reset since
2785 : : * EvalPlanQualStart (see comments therein).
2786 : : */
1683 andres@anarazel.de 2787 : 72 : ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2038 tgl@sss.pgh.pa.us 2788 [ + - ]: 72 : GetPerTupleExprContext(parentestate));
2789 : :
2344 rhaas@postgresql.org 2790 : 72 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2791 : :
5284 tgl@sss.pgh.pa.us 2792 [ + + ]: 153 : while (--i >= 0)
2793 : : {
2794 : : /* copy value if any, but not execPlan link */
1683 andres@anarazel.de 2795 : 81 : recheckestate->es_param_exec_vals[i].value =
5284 tgl@sss.pgh.pa.us 2796 : 81 : parentestate->es_param_exec_vals[i].value;
1683 andres@anarazel.de 2797 : 81 : recheckestate->es_param_exec_vals[i].isnull =
5284 tgl@sss.pgh.pa.us 2798 : 81 : parentestate->es_param_exec_vals[i].isnull;
2799 : : }
2800 : : }
2801 : :
2802 : : /*
2803 : : * Mark child plan tree as needing rescan at all scan nodes. The
2804 : : * first ExecProcNode will take care of actually doing the rescan.
2805 : : */
1683 andres@anarazel.de 2806 : 72 : rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
2807 : : epqstate->epqParam);
2808 : : }
7788 tgl@sss.pgh.pa.us 2809 : 184 : }
2810 : :
2811 : : /*
2812 : : * Start execution of an EvalPlanQual plan tree.
2813 : : *
2814 : : * This is a cut-down version of ExecutorStart(): we copy some state from
2815 : : * the top-level estate rather than initializing it fresh.
2816 : : */
2817 : : static void
1683 andres@anarazel.de 2818 : 112 : EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
2819 : : {
2820 : 112 : EState *parentestate = epqstate->parentestate;
2821 : 112 : Index rtsize = parentestate->es_range_table_size;
2822 : : EState *rcestate;
2823 : : MemoryContext oldcontext;
2824 : : ListCell *l;
2825 : :
2826 : 112 : epqstate->recheckestate = rcestate = CreateExecutorState();
2827 : :
2828 : 112 : oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
2829 : :
2830 : : /* signal that this is an EState for executing EPQ */
2831 : 112 : rcestate->es_epq_active = epqstate;
2832 : :
2833 : : /*
2834 : : * Child EPQ EStates share the parent's copy of unchanging state such as
2835 : : * the snapshot, rangetable, and external Param info. They need their own
2836 : : * copies of local state, including a tuple table, es_param_exec_vals,
2837 : : * result-rel info, etc.
2838 : : */
2839 : 112 : rcestate->es_direction = ForwardScanDirection;
2840 : 112 : rcestate->es_snapshot = parentestate->es_snapshot;
2841 : 112 : rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2842 : 112 : rcestate->es_range_table = parentestate->es_range_table;
2843 : 112 : rcestate->es_range_table_size = parentestate->es_range_table_size;
2844 : 112 : rcestate->es_relations = parentestate->es_relations;
2845 : 112 : rcestate->es_rowmarks = parentestate->es_rowmarks;
405 tgl@sss.pgh.pa.us 2846 : 112 : rcestate->es_rteperminfos = parentestate->es_rteperminfos;
1683 andres@anarazel.de 2847 : 112 : rcestate->es_plannedstmt = parentestate->es_plannedstmt;
2848 : 112 : rcestate->es_junkFilter = parentestate->es_junkFilter;
2849 : 112 : rcestate->es_output_cid = parentestate->es_output_cid;
405 tgl@sss.pgh.pa.us 2850 : 112 : rcestate->es_queryEnv = parentestate->es_queryEnv;
2851 : :
2852 : : /*
2853 : : * ResultRelInfos needed by subplans are initialized from scratch when the
2854 : : * subplans themselves are initialized.
2855 : : */
1269 heikki.linnakangas@i 2856 : 112 : rcestate->es_result_relations = NULL;
2857 : : /* es_trig_target_relations must NOT be copied */
1683 andres@anarazel.de 2858 : 112 : rcestate->es_top_eflags = parentestate->es_top_eflags;
2859 : 112 : rcestate->es_instrument = parentestate->es_instrument;
2860 : : /* es_auxmodifytables must NOT be copied */
2861 : :
2862 : : /*
2863 : : * The external param list is simply shared from parent. The internal
2864 : : * param workspace has to be local state, but we copy the initial values
2865 : : * from the parent, so as to have access to any param values that were
2866 : : * already set from other parts of the parent's plan tree.
2867 : : */
2868 : 112 : rcestate->es_param_list_info = parentestate->es_param_list_info;
2344 rhaas@postgresql.org 2869 [ + - ]: 112 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2870 : : {
2871 : : int i;
2872 : :
2873 : : /*
2874 : : * Force evaluation of any InitPlan outputs that could be needed by
2875 : : * the subplan. (With more complexity, maybe we could postpone this
2876 : : * till the subplan actually demands them, but it doesn't seem worth
2877 : : * the trouble; this is a corner case already, since usually the
2878 : : * InitPlans would have been evaluated before reaching EvalPlanQual.)
2879 : : *
2880 : : * This will not touch output params of InitPlans that occur somewhere
2881 : : * within the subplan tree, only those that are attached to the
2882 : : * ModifyTable node or above it and are referenced within the subplan.
2883 : : * That's OK though, because the planner would only attach such
2884 : : * InitPlans to a lower-level SubqueryScan node, and EPQ execution
2885 : : * will not descend into a SubqueryScan.
2886 : : *
2887 : : * The EState's per-output-tuple econtext is sufficiently short-lived
2888 : : * for this, since it should get reset before there is any chance of
2889 : : * doing EvalPlanQual again.
2890 : : */
2038 tgl@sss.pgh.pa.us 2891 : 112 : ExecSetParamPlanMulti(planTree->extParam,
2892 [ + + ]: 112 : GetPerTupleExprContext(parentestate));
2893 : :
2894 : : /* now make the internal param workspace ... */
2344 rhaas@postgresql.org 2895 : 112 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
1683 andres@anarazel.de 2896 : 112 : rcestate->es_param_exec_vals = (ParamExecData *)
5284 tgl@sss.pgh.pa.us 2897 : 112 : palloc0(i * sizeof(ParamExecData));
2898 : : /* ... and copy down all values, whether really needed or not */
2899 [ + + ]: 273 : while (--i >= 0)
2900 : : {
2901 : : /* copy value if any, but not execPlan link */
1683 andres@anarazel.de 2902 : 161 : rcestate->es_param_exec_vals[i].value =
5284 tgl@sss.pgh.pa.us 2903 : 161 : parentestate->es_param_exec_vals[i].value;
1683 andres@anarazel.de 2904 : 161 : rcestate->es_param_exec_vals[i].isnull =
5284 tgl@sss.pgh.pa.us 2905 : 161 : parentestate->es_param_exec_vals[i].isnull;
2906 : : }
2907 : : }
2908 : :
2909 : : /*
2910 : : * Initialize private state information for each SubPlan. We must do this
2911 : : * before running ExecInitNode on the main query tree, since
2912 : : * ExecInitSubPlan expects to be able to find these entries. Some of the
2913 : : * SubPlans might not be used in the part of the plan tree we intend to
2914 : : * run, but since it's not easy to tell which, we just initialize them
2915 : : * all.
2916 : : */
1683 andres@anarazel.de 2917 [ - + ]: 112 : Assert(rcestate->es_subplanstates == NIL);
5284 tgl@sss.pgh.pa.us 2918 [ + + + + : 140 : foreach(l, parentestate->es_plannedstmt->subplans)
+ + ]
2919 : : {
5995 bruce@momjian.us 2920 : 28 : Plan *subplan = (Plan *) lfirst(l);
2921 : : PlanState *subplanstate;
2922 : :
1683 andres@anarazel.de 2923 : 28 : subplanstate = ExecInitNode(subplan, rcestate, 0);
2924 : 28 : rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
2925 : : subplanstate);
2926 : : }
2927 : :
2928 : : /*
2929 : : * Build an RTI indexed array of rowmarks, so that
2930 : : * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
2931 : : * rowmark.
2932 : : */
1538 tgl@sss.pgh.pa.us 2933 : 112 : epqstate->relsubs_rowmark = (ExecAuxRowMark **)
2934 : 112 : palloc0(rtsize * sizeof(ExecAuxRowMark *));
1683 andres@anarazel.de 2935 [ + + + + : 124 : foreach(l, epqstate->arowMarks)
+ + ]
2936 : : {
2937 : 12 : ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
2938 : :
2939 : 12 : epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
2940 : : }
2941 : :
2942 : : /*
2943 : : * Initialize per-relation EPQ tuple states. Result relations, if any,
2944 : : * get marked as blocked; others as not-fetched.
2945 : : */
331 tgl@sss.pgh.pa.us 2946 : 112 : epqstate->relsubs_done = palloc_array(bool, rtsize);
2947 : 112 : epqstate->relsubs_blocked = palloc0_array(bool, rtsize);
2948 : :
2949 [ + + + + : 225 : foreach(l, epqstate->resultRelations)
+ + ]
2950 : : {
2951 : 113 : int rtindex = lfirst_int(l);
2952 : :
2953 [ + - - + ]: 113 : Assert(rtindex > 0 && rtindex <= rtsize);
2954 : 113 : epqstate->relsubs_blocked[rtindex - 1] = true;
2955 : : }
2956 : :
2957 : 112 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2958 : : rtsize * sizeof(bool));
2959 : :
2960 : : /*
2961 : : * Initialize the private state information for all the nodes in the part
2962 : : * of the plan tree we need to run. This opens files, allocates storage
2963 : : * and leaves us ready to start processing tuples.
2964 : : */
1683 andres@anarazel.de 2965 : 112 : epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
2966 : :
7788 tgl@sss.pgh.pa.us 2967 : 112 : MemoryContextSwitchTo(oldcontext);
2968 : 112 : }
2969 : :
2970 : : /*
2971 : : * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2972 : : * or if we are done with the current EPQ child.
2973 : : *
2974 : : * This is a cut-down version of ExecutorEnd(); basically we want to do most
2975 : : * of the normal cleanup, but *not* close result relations (which we are
2976 : : * just sharing from the outer query). We do, however, have to close any
2977 : : * result and trigger target relations that got opened, since those are not
2978 : : * shared. (There probably shouldn't be any of the latter, but just in
2979 : : * case...)
2980 : : */
2981 : : void
5284 2982 : 188805 : EvalPlanQualEnd(EPQState *epqstate)
2983 : : {
1683 andres@anarazel.de 2984 : 188805 : EState *estate = epqstate->recheckestate;
2985 : : Index rtsize;
2986 : : MemoryContext oldcontext;
2987 : : ListCell *l;
2988 : :
2989 : 188805 : rtsize = epqstate->parentestate->es_range_table_size;
2990 : :
2991 : : /*
2992 : : * We may have a tuple table, even if EPQ wasn't started, because we allow
2993 : : * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
2994 : : */
2995 [ + + ]: 188805 : if (epqstate->tuple_table != NIL)
2996 : : {
2997 : 2792 : memset(epqstate->relsubs_slot, 0,
2998 : : rtsize * sizeof(TupleTableSlot *));
2999 : 2792 : ExecResetTupleTable(epqstate->tuple_table, true);
3000 : 2792 : epqstate->tuple_table = NIL;
3001 : : }
3002 : :
3003 : : /* EPQ wasn't started, nothing further to do */
5284 tgl@sss.pgh.pa.us 3004 [ + + ]: 188805 : if (estate == NULL)
1683 andres@anarazel.de 3005 : 188699 : return;
3006 : :
5284 tgl@sss.pgh.pa.us 3007 : 106 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3008 : :
1683 andres@anarazel.de 3009 : 106 : ExecEndNode(epqstate->recheckplanstate);
3010 : :
5284 tgl@sss.pgh.pa.us 3011 [ + + + + : 131 : foreach(l, estate->es_subplanstates)
+ + ]
3012 : : {
5995 bruce@momjian.us 3013 : 25 : PlanState *subplanstate = (PlanState *) lfirst(l);
3014 : :
6256 tgl@sss.pgh.pa.us 3015 : 25 : ExecEndNode(subplanstate);
3016 : : }
3017 : :
3018 : : /* throw away the per-estate tuple table, some node may have used it */
5284 3019 : 106 : ExecResetTupleTable(estate->es_tupleTable, false);
3020 : :
3021 : : /* Close any result and trigger target relations attached to this EState */
1279 heikki.linnakangas@i 3022 : 106 : ExecCloseResultRelations(estate);
3023 : :
7788 tgl@sss.pgh.pa.us 3024 : 106 : MemoryContextSwitchTo(oldcontext);
3025 : :
5284 3026 : 106 : FreeExecutorState(estate);
3027 : :
3028 : : /* Mark EPQState idle */
1538 3029 : 106 : epqstate->origslot = NULL;
1683 andres@anarazel.de 3030 : 106 : epqstate->recheckestate = NULL;
3031 : 106 : epqstate->recheckplanstate = NULL;
1538 tgl@sss.pgh.pa.us 3032 : 106 : epqstate->relsubs_rowmark = NULL;
3033 : 106 : epqstate->relsubs_done = NULL;
331 3034 : 106 : epqstate->relsubs_blocked = NULL;
3035 : : }
|