Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * portalmem.c
4 : : * backend portal memory management
5 : : *
6 : : * Portals are objects representing the execution state of a query.
7 : : * This module provides memory management services for portals, but it
8 : : * doesn't actually run the executor for them.
9 : : *
10 : : *
11 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
12 : : * Portions Copyright (c) 1994, Regents of the University of California
13 : : *
14 : : * IDENTIFICATION
15 : : * src/backend/utils/mmgr/portalmem.c
16 : : *
17 : : *-------------------------------------------------------------------------
18 : : */
19 : : #include "postgres.h"
20 : :
21 : : #include "access/xact.h"
22 : : #include "commands/portalcmds.h"
23 : : #include "funcapi.h"
24 : : #include "miscadmin.h"
25 : : #include "storage/ipc.h"
26 : : #include "utils/builtins.h"
27 : : #include "utils/memutils.h"
28 : : #include "utils/snapmgr.h"
29 : : #include "utils/timestamp.h"
30 : :
31 : : /*
32 : : * Estimate of the maximum number of open portals a user would have,
33 : : * used in initially sizing the PortalHashTable in EnablePortalManager().
34 : : * Since the hash table can expand, there's no need to make this overly
35 : : * generous, and keeping it small avoids unnecessary overhead in the
36 : : * hash_seq_search() calls executed during transaction end.
37 : : */
38 : : #define PORTALS_PER_USER 16
39 : :
40 : :
41 : : /* ----------------
42 : : * Global state
43 : : * ----------------
44 : : */
45 : :
46 : : #define MAX_PORTALNAME_LEN NAMEDATALEN
47 : :
48 : : typedef struct portalhashent
49 : : {
50 : : char portalname[MAX_PORTALNAME_LEN];
51 : : Portal portal;
52 : : } PortalHashEnt;
53 : :
54 : : static HTAB *PortalHashTable = NULL;
55 : :
56 : : #define PortalHashTableLookup(NAME, PORTAL) \
57 : : do { \
58 : : PortalHashEnt *hentry; \
59 : : \
60 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
61 : : (NAME), HASH_FIND, NULL); \
62 : : if (hentry) \
63 : : PORTAL = hentry->portal; \
64 : : else \
65 : : PORTAL = NULL; \
66 : : } while(0)
67 : :
68 : : #define PortalHashTableInsert(PORTAL, NAME) \
69 : : do { \
70 : : PortalHashEnt *hentry; bool found; \
71 : : \
72 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
73 : : (NAME), HASH_ENTER, &found); \
74 : : if (found) \
75 : : elog(ERROR, "duplicate portal name"); \
76 : : hentry->portal = PORTAL; \
77 : : /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
78 : : PORTAL->name = hentry->portalname; \
79 : : } while(0)
80 : :
81 : : #define PortalHashTableDelete(PORTAL) \
82 : : do { \
83 : : PortalHashEnt *hentry; \
84 : : \
85 : : hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
86 : : PORTAL->name, HASH_REMOVE, NULL); \
87 : : if (hentry == NULL) \
88 : : elog(WARNING, "trying to delete portal name that does not exist"); \
89 : : } while(0)
90 : :
91 : : static MemoryContext TopPortalContext = NULL;
92 : :
93 : :
94 : : /* ----------------------------------------------------------------
95 : : * public portal interface functions
96 : : * ----------------------------------------------------------------
97 : : */
98 : :
99 : : /*
100 : : * EnablePortalManager
101 : : * Enables the portal management module at backend startup.
102 : : */
103 : : void
8691 tgl@sss.pgh.pa.us 104 :CBC 16472 : EnablePortalManager(void)
105 : : {
106 : : HASHCTL ctl;
107 : :
2311 peter_e@gmx.net 108 [ - + ]: 16472 : Assert(TopPortalContext == NULL);
109 : :
110 : 16472 : TopPortalContext = AllocSetContextCreate(TopMemoryContext,
111 : : "TopPortalContext",
112 : : ALLOCSET_DEFAULT_SIZES);
113 : :
8691 tgl@sss.pgh.pa.us 114 : 16472 : ctl.keysize = MAX_PORTALNAME_LEN;
8231 115 : 16472 : ctl.entrysize = sizeof(PortalHashEnt);
116 : :
117 : : /*
118 : : * use PORTALS_PER_USER as a guess of how many hash table entries to
119 : : * create, initially
120 : : */
8227 121 : 16472 : PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
122 : : &ctl, HASH_ELEM | HASH_STRINGS);
10141 scrappy@hub.org 123 : 16472 : }
124 : :
125 : : /*
126 : : * GetPortalByName
127 : : * Returns a portal given a portal name, or NULL if name not found.
128 : : */
129 : : Portal
7776 tgl@sss.pgh.pa.us 130 : 368642 : GetPortalByName(const char *name)
131 : : {
132 : : Portal portal;
133 : :
9716 bruce@momjian.us 134 [ + - ]: 368642 : if (PointerIsValid(name))
135 [ + + ]: 368642 : PortalHashTableLookup(name, portal);
136 : : else
8691 tgl@sss.pgh.pa.us 137 :UBC 0 : portal = NULL;
138 : :
9357 bruce@momjian.us 139 :CBC 368642 : return portal;
140 : : }
141 : :
142 : : /*
143 : : * PortalGetPrimaryStmt
144 : : * Get the "primary" stmt within a portal, ie, the one marked canSetTag.
145 : : *
146 : : * Returns NULL if no such stmt. If multiple PlannedStmt structs within the
147 : : * portal are marked canSetTag, returns the first one. Neither of these
148 : : * cases should occur in present usages of this function.
149 : : */
150 : : PlannedStmt *
2647 tgl@sss.pgh.pa.us 151 : 156104 : PortalGetPrimaryStmt(Portal portal)
152 : : {
153 : : ListCell *lc;
154 : :
155 [ + - + - : 156104 : foreach(lc, portal->stmts)
+ - ]
156 : : {
2561 157 : 156104 : PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
158 : :
2647 159 [ + - ]: 156104 : if (stmt->canSetTag)
160 : 156104 : return stmt;
161 : : }
6453 tgl@sss.pgh.pa.us 162 :UBC 0 : return NULL;
163 : : }
164 : :
165 : : /*
166 : : * CreatePortal
167 : : * Returns a new portal given a name.
168 : : *
169 : : * allowDup: if true, automatically drop any pre-existing portal of the
170 : : * same name (if false, an error is raised).
171 : : *
172 : : * dupSilent: if true, don't even emit a WARNING.
173 : : */
174 : : Portal
7653 tgl@sss.pgh.pa.us 175 :CBC 324080 : CreatePortal(const char *name, bool allowDup, bool dupSilent)
176 : : {
177 : : Portal portal;
178 : :
534 peter@eisentraut.org 179 [ - + ]: 324080 : Assert(PointerIsValid(name));
180 : :
9716 bruce@momjian.us 181 : 324080 : portal = GetPortalByName(name);
182 [ + + ]: 324080 : if (PortalIsValid(portal))
183 : : {
7653 tgl@sss.pgh.pa.us 184 [ - + ]: 5226 : if (!allowDup)
7569 tgl@sss.pgh.pa.us 185 [ # # ]:UBC 0 : ereport(ERROR,
186 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
187 : : errmsg("cursor \"%s\" already exists", name)));
7653 tgl@sss.pgh.pa.us 188 [ - + ]:CBC 5226 : if (!dupSilent)
7569 tgl@sss.pgh.pa.us 189 [ # # ]:UBC 0 : ereport(WARNING,
190 : : (errcode(ERRCODE_DUPLICATE_CURSOR),
191 : : errmsg("closing existing cursor \"%s\"",
192 : : name)));
7653 tgl@sss.pgh.pa.us 193 :CBC 5226 : PortalDrop(portal, false);
194 : : }
195 : :
196 : : /* make new portal structure */
2311 peter_e@gmx.net 197 : 324080 : portal = (Portal) MemoryContextAllocZero(TopPortalContext, sizeof *portal);
198 : :
199 : : /* initialize portal context; typically it won't store much */
200 : 324080 : portal->portalContext = AllocSetContextCreate(TopPortalContext,
201 : : "PortalContext",
202 : : ALLOCSET_SMALL_SIZES);
203 : :
204 : : /* create a resource owner for the portal */
7211 tgl@sss.pgh.pa.us 205 : 324080 : portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
206 : : "Portal");
207 : :
208 : : /* initialize portal fields that don't start off zero */
6661 neilc@samurai.com 209 : 324080 : portal->status = PORTAL_NEW;
7656 tgl@sss.pgh.pa.us 210 : 324080 : portal->cleanup = PortalCleanup;
7150 211 : 324080 : portal->createSubid = GetCurrentSubTransactionId();
3145 212 : 324080 : portal->activeSubid = portal->createSubid;
926 213 : 324080 : portal->createLevel = GetCurrentTransactionNestLevel();
7653 214 : 324080 : portal->strategy = PORTAL_MULTI_QUERY;
215 : 324080 : portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
7705 216 : 324080 : portal->atStart = true;
217 : 324080 : portal->atEnd = true; /* disallow fetches until query is set */
6661 neilc@samurai.com 218 : 324080 : portal->visible = true;
6508 tgl@sss.pgh.pa.us 219 : 324080 : portal->creation_time = GetCurrentStatementStartTimestamp();
220 : :
221 : : /* put portal in table (sets portal->name) */
7653 222 [ - + - - ]: 324080 : PortalHashTableInsert(portal, name);
223 : :
224 : : /* for named portals reuse portal->name copy */
1314 peter@eisentraut.org 225 [ + + ]: 324080 : MemoryContextSetIdentifier(portal->portalContext, portal->name[0] ? portal->name : "<unnamed>");
226 : :
9357 bruce@momjian.us 227 : 324080 : return portal;
228 : : }
229 : :
230 : : /*
231 : : * CreateNewPortal
232 : : * Create a new portal, assigning it a random nonconflicting name.
233 : : */
234 : : Portal
7653 tgl@sss.pgh.pa.us 235 : 11758 : CreateNewPortal(void)
236 : : {
237 : : static unsigned int unnamed_portal_count = 0;
238 : :
239 : : char portalname[MAX_PORTALNAME_LEN];
240 : :
241 : : /* Select a nonconflicting name */
242 : : for (;;)
243 : : {
244 : 11758 : unnamed_portal_count++;
245 : 11758 : sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
246 [ + - ]: 11758 : if (GetPortalByName(portalname) == NULL)
247 : 11758 : break;
248 : : }
249 : :
250 : 11758 : return CreatePortal(portalname, false, false);
251 : : }
252 : :
253 : : /*
254 : : * PortalDefineQuery
255 : : * A simple subroutine to establish a portal's query.
256 : : *
257 : : * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
258 : : * allowed anymore to pass NULL. (If you really don't have source text,
259 : : * you can pass a constant string, perhaps "(query not available)".)
260 : : *
261 : : * commandTag shall be NULL if and only if the original query string
262 : : * (before rewriting) was an empty string. Also, the passed commandTag must
263 : : * be a pointer to a constant string, since it is not copied.
264 : : *
265 : : * If cplan is provided, then it is a cached plan containing the stmts, and
266 : : * the caller must have done GetCachedPlan(), causing a refcount increment.
267 : : * The refcount will be released when the portal is destroyed.
268 : : *
269 : : * If cplan is NULL, then it is the caller's responsibility to ensure that
270 : : * the passed plan trees have adequate lifetime. Typically this is done by
271 : : * copying them into the portal's context.
272 : : *
273 : : * The caller is also responsible for ensuring that the passed prepStmtName
274 : : * (if not NULL) and sourceText have adequate lifetime.
275 : : *
276 : : * NB: this function mustn't do much beyond storing the passed values; in
277 : : * particular don't do anything that risks elog(ERROR). If that were to
278 : : * happen here before storing the cplan reference, we'd leak the plancache
279 : : * refcount that the caller is trying to hand off to us.
280 : : */
281 : : void
282 : 324063 : PortalDefineQuery(Portal portal,
283 : : const char *prepStmtName,
284 : : const char *sourceText,
285 : : CommandTag commandTag,
286 : : List *stmts,
287 : : CachedPlan *cplan)
288 : : {
534 peter@eisentraut.org 289 [ - + ]: 324063 : Assert(PortalIsValid(portal));
290 [ - + ]: 324063 : Assert(portal->status == PORTAL_NEW);
291 : :
292 [ - + ]: 324063 : Assert(sourceText != NULL);
293 [ - + - - ]: 324063 : Assert(commandTag != CMDTAG_UNKNOWN || stmts == NIL);
294 : :
5856 tgl@sss.pgh.pa.us 295 : 324063 : portal->prepStmtName = prepStmtName;
296 : 324063 : portal->sourceText = sourceText;
1504 alvherre@alvh.no-ip. 297 : 324063 : portal->qc.commandTag = commandTag;
298 : 324063 : portal->qc.nprocessed = 0;
7653 tgl@sss.pgh.pa.us 299 : 324063 : portal->commandTag = commandTag;
6263 300 : 324063 : portal->stmts = stmts;
6242 301 : 324063 : portal->cplan = cplan;
302 : 324063 : portal->status = PORTAL_DEFINED;
303 : 324063 : }
304 : :
305 : : /*
306 : : * PortalReleaseCachedPlan
307 : : * Release a portal's reference to its cached plan, if any.
308 : : */
309 : : static void
310 : 337623 : PortalReleaseCachedPlan(Portal portal)
311 : : {
312 [ + + ]: 337623 : if (portal->cplan)
313 : : {
1175 314 : 16042 : ReleaseCachedPlan(portal->cplan, NULL);
6242 315 : 16042 : portal->cplan = NULL;
316 : :
317 : : /*
318 : : * We must also clear portal->stmts which is now a dangling reference
319 : : * to the cached plan's plan list. This protects any code that might
320 : : * try to examine the Portal later.
321 : : */
5200 322 : 16042 : portal->stmts = NIL;
323 : : }
7653 324 : 337623 : }
325 : :
326 : : /*
327 : : * PortalCreateHoldStore
328 : : * Create the tuplestore for a portal.
329 : : */
330 : : void
7649 331 : 21189 : PortalCreateHoldStore(Portal portal)
332 : : {
333 : : MemoryContext oldcxt;
334 : :
335 [ - + ]: 21189 : Assert(portal->holdContext == NULL);
336 [ - + ]: 21189 : Assert(portal->holdStore == NULL);
2807 337 [ - + ]: 21189 : Assert(portal->holdSnapshot == NULL);
338 : :
339 : : /*
340 : : * Create the memory context that is used for storage of the tuple set.
341 : : * Note this is NOT a child of the portal's portalContext.
342 : : */
7649 343 : 21189 : portal->holdContext =
2311 peter_e@gmx.net 344 : 21189 : AllocSetContextCreate(TopPortalContext,
345 : : "PortalHoldContext",
346 : : ALLOCSET_DEFAULT_SIZES);
347 : :
348 : : /*
349 : : * Create the tuple store, selecting cross-transaction temp files, and
350 : : * enabling random access only if cursor requires scrolling.
351 : : *
352 : : * XXX: Should maintenance_work_mem be used for the portal size?
353 : : */
7649 tgl@sss.pgh.pa.us 354 : 21189 : oldcxt = MemoryContextSwitchTo(portal->holdContext);
355 : :
5646 356 : 21189 : portal->holdStore =
357 : 21189 : tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
358 : : true, work_mem);
359 : :
7649 360 : 21189 : MemoryContextSwitchTo(oldcxt);
361 : 21189 : }
362 : :
363 : : /*
364 : : * PinPortal
365 : : * Protect a portal from dropping.
366 : : *
367 : : * A pinned portal is still unpinned and dropped at transaction or
368 : : * subtransaction abort.
369 : : */
370 : : void
5032 heikki.linnakangas@i 371 : 5894 : PinPortal(Portal portal)
372 : : {
373 [ - + ]: 5894 : if (portal->portalPinned)
5032 heikki.linnakangas@i 374 [ # # ]:UBC 0 : elog(ERROR, "portal already pinned");
375 : :
5032 heikki.linnakangas@i 376 :CBC 5894 : portal->portalPinned = true;
377 : 5894 : }
378 : :
379 : : void
380 : 5870 : UnpinPortal(Portal portal)
381 : : {
382 [ - + ]: 5870 : if (!portal->portalPinned)
5032 heikki.linnakangas@i 383 [ # # ]:UBC 0 : elog(ERROR, "portal not pinned");
384 : :
5032 heikki.linnakangas@i 385 :CBC 5870 : portal->portalPinned = false;
386 : 5870 : }
387 : :
388 : : /*
389 : : * MarkPortalActive
390 : : * Transition a portal from READY to ACTIVE state.
391 : : *
392 : : * NOTE: never set portal->status = PORTAL_ACTIVE directly; call this instead.
393 : : */
394 : : void
3145 tgl@sss.pgh.pa.us 395 : 341062 : MarkPortalActive(Portal portal)
396 : : {
397 : : /* For safety, this is a runtime test not just an Assert */
398 [ + + ]: 341062 : if (portal->status != PORTAL_READY)
399 [ + - ]: 9 : ereport(ERROR,
400 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
401 : : errmsg("portal \"%s\" cannot be run", portal->name)));
402 : : /* Perform the state transition */
403 : 341053 : portal->status = PORTAL_ACTIVE;
404 : 341053 : portal->activeSubid = GetCurrentSubTransactionId();
405 : 341053 : }
406 : :
407 : : /*
408 : : * MarkPortalDone
409 : : * Transition a portal from ACTIVE to DONE state.
410 : : *
411 : : * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
412 : : */
413 : : void
4791 414 : 168643 : MarkPortalDone(Portal portal)
415 : : {
416 : : /* Perform the state transition */
417 [ - + ]: 168643 : Assert(portal->status == PORTAL_ACTIVE);
418 : 168643 : portal->status = PORTAL_DONE;
419 : :
420 : : /*
421 : : * Allow portalcmds.c to clean up the state it knows about. We might as
422 : : * well do that now, since the portal can't be executed any more.
423 : : *
424 : : * In some cases involving execution of a ROLLBACK command in an already
425 : : * aborted transaction, this is necessary, or we'd reach AtCleanup_Portals
426 : : * with the cleanup hook still unexecuted.
427 : : */
4442 428 [ + + ]: 168643 : if (PointerIsValid(portal->cleanup))
429 : : {
2411 peter_e@gmx.net 430 : 168620 : portal->cleanup(portal);
4442 tgl@sss.pgh.pa.us 431 : 168620 : portal->cleanup = NULL;
432 : : }
433 : 168643 : }
434 : :
435 : : /*
436 : : * MarkPortalFailed
437 : : * Transition a portal into FAILED state.
438 : : *
439 : : * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
440 : : */
441 : : void
442 : 13407 : MarkPortalFailed(Portal portal)
443 : : {
444 : : /* Perform the state transition */
445 [ - + ]: 13407 : Assert(portal->status != PORTAL_DONE);
446 : 13407 : portal->status = PORTAL_FAILED;
447 : :
448 : : /*
449 : : * Allow portalcmds.c to clean up the state it knows about. We might as
450 : : * well do that now, since the portal can't be executed any more.
451 : : *
452 : : * In some cases involving cleanup of an already aborted transaction, this
453 : : * is necessary, or we'd reach AtCleanup_Portals with the cleanup hook
454 : : * still unexecuted.
455 : : */
4791 456 [ + + ]: 13407 : if (PointerIsValid(portal->cleanup))
457 : : {
2411 peter_e@gmx.net 458 : 13400 : portal->cleanup(portal);
4791 tgl@sss.pgh.pa.us 459 : 13400 : portal->cleanup = NULL;
460 : : }
461 : 13407 : }
462 : :
463 : : /*
464 : : * PortalDrop
465 : : * Destroy the portal.
466 : : */
467 : : void
7211 468 : 324068 : PortalDrop(Portal portal, bool isTopCommit)
469 : : {
534 peter@eisentraut.org 470 [ - + ]: 324068 : Assert(PortalIsValid(portal));
471 : :
472 : : /*
473 : : * Don't allow dropping a pinned portal, it's still needed by whoever
474 : : * pinned it.
475 : : */
2286 peter_e@gmx.net 476 [ - + ]: 324068 : if (portal->portalPinned)
2286 peter_e@gmx.net 477 [ # # ]:UBC 0 : ereport(ERROR,
478 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
479 : : errmsg("cannot drop pinned portal \"%s\"", portal->name)));
480 : :
481 : : /*
482 : : * Not sure if the PORTAL_ACTIVE case can validly happen or not...
483 : : */
2286 peter_e@gmx.net 484 [ - + ]:CBC 324068 : if (portal->status == PORTAL_ACTIVE)
5032 heikki.linnakangas@i 485 [ # # ]:UBC 0 : ereport(ERROR,
486 : : (errcode(ERRCODE_INVALID_CURSOR_STATE),
487 : : errmsg("cannot drop active portal \"%s\"", portal->name)));
488 : :
489 : : /*
490 : : * Allow portalcmds.c to clean up the state it knows about, in particular
491 : : * shutting down the executor if still active. This step potentially runs
492 : : * user-defined code so failure has to be expected. It's the cleanup
493 : : * hook's responsibility to not try to do that more than once, in the case
494 : : * that failure occurs and then we come back to drop the portal again
495 : : * during transaction abort.
496 : : *
497 : : * Note: in most paths of control, this will have been done already in
498 : : * MarkPortalDone or MarkPortalFailed. We're just making sure.
499 : : */
4795 tgl@sss.pgh.pa.us 500 [ + + ]:CBC 324068 : if (PointerIsValid(portal->cleanup))
501 : : {
2411 peter_e@gmx.net 502 : 142000 : portal->cleanup(portal);
4795 tgl@sss.pgh.pa.us 503 : 142000 : portal->cleanup = NULL;
504 : : }
505 : :
506 : : /* There shouldn't be an active snapshot anymore, except after error */
1059 507 [ + + - + ]: 324068 : Assert(portal->portalSnapshot == NULL || !isTopCommit);
508 : :
509 : : /*
510 : : * Remove portal from hash table. Because we do this here, we will not
511 : : * come back to try to remove the portal again if there's any error in the
512 : : * subsequent steps. Better to leak a little memory than to get into an
513 : : * infinite error-recovery loop.
514 : : */
8691 515 [ - + - - ]: 324068 : PortalHashTableDelete(portal);
516 : :
517 : : /* drop cached plan reference, if any */
5200 518 : 324068 : PortalReleaseCachedPlan(portal);
519 : :
520 : : /*
521 : : * If portal has a snapshot protecting its data, release that. This needs
522 : : * a little care since the registration will be attached to the portal's
523 : : * resowner; if the portal failed, we will already have released the
524 : : * resowner (and the snapshot) during transaction abort.
525 : : */
2807 526 [ + + ]: 324068 : if (portal->holdSnapshot)
527 : : {
528 [ + + ]: 18123 : if (portal->resowner)
529 : 17970 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
530 : : portal->resowner);
531 : 18123 : portal->holdSnapshot = NULL;
532 : : }
533 : :
534 : : /*
535 : : * Release any resources still attached to the portal. There are several
536 : : * cases being covered here:
537 : : *
538 : : * Top transaction commit (indicated by isTopCommit): normally we should
539 : : * do nothing here and let the regular end-of-transaction resource
540 : : * releasing mechanism handle these resources too. However, if we have a
541 : : * FAILED portal (eg, a cursor that got an error), we'd better clean up
542 : : * its resources to avoid resource-leakage warning messages.
543 : : *
544 : : * Sub transaction commit: never comes here at all, since we don't kill
545 : : * any portals in AtSubCommit_Portals().
546 : : *
547 : : * Main or sub transaction abort: we will do nothing here because
548 : : * portal->resowner was already set NULL; the resources were already
549 : : * cleaned up in transaction abort.
550 : : *
551 : : * Ordinary portal drop: must release resources. However, if the portal
552 : : * is not FAILED then we do not release its locks. The locks become the
553 : : * responsibility of the transaction's ResourceOwner (since it is the
554 : : * parent of the portal's owner) and will be released when the transaction
555 : : * eventually ends.
556 : : */
7211 557 [ + + ]: 324068 : if (portal->resowner &&
558 [ + + - + ]: 306879 : (!isTopCommit || portal->status == PORTAL_FAILED))
559 : : {
7168 bruce@momjian.us 560 : 302319 : bool isCommit = (portal->status != PORTAL_FAILED);
561 : :
7211 tgl@sss.pgh.pa.us 562 : 302319 : ResourceOwnerRelease(portal->resowner,
563 : : RESOURCE_RELEASE_BEFORE_LOCKS,
564 : : isCommit, false);
565 : 302319 : ResourceOwnerRelease(portal->resowner,
566 : : RESOURCE_RELEASE_LOCKS,
567 : : isCommit, false);
568 : 302319 : ResourceOwnerRelease(portal->resowner,
569 : : RESOURCE_RELEASE_AFTER_LOCKS,
570 : : isCommit, false);
7172 571 : 302319 : ResourceOwnerDelete(portal->resowner);
572 : : }
7211 573 : 324068 : portal->resowner = NULL;
574 : :
575 : : /*
576 : : * Delete tuplestore if present. We should do this even under error
577 : : * conditions; since the tuplestore would have been using cross-
578 : : * transaction storage, its temp files need to be explicitly deleted.
579 : : */
7649 580 [ + + ]: 324068 : if (portal->holdStore)
581 : : {
582 : : MemoryContext oldcontext;
583 : :
584 : 21180 : oldcontext = MemoryContextSwitchTo(portal->holdContext);
585 : 21180 : tuplestore_end(portal->holdStore);
586 : 21180 : MemoryContextSwitchTo(oldcontext);
587 : 21180 : portal->holdStore = NULL;
588 : : }
589 : :
590 : : /* delete tuplestore storage, if any */
7689 bruce@momjian.us 591 [ + + ]: 324068 : if (portal->holdContext)
592 : 21180 : MemoryContextDelete(portal->holdContext);
593 : :
594 : : /* release subsidiary storage */
2311 peter_e@gmx.net 595 : 324068 : MemoryContextDelete(portal->portalContext);
596 : :
597 : : /* release portal struct (it's in TopPortalContext) */
8691 tgl@sss.pgh.pa.us 598 : 324068 : pfree(portal);
10141 scrappy@hub.org 599 : 324068 : }
600 : :
601 : : /*
602 : : * Delete all declared cursors.
603 : : *
604 : : * Used by commands: CLOSE ALL, DISCARD ALL
605 : : */
606 : : void
6212 neilc@samurai.com 607 : 9 : PortalHashTableDeleteAll(void)
608 : : {
609 : : HASH_SEQ_STATUS status;
610 : : PortalHashEnt *hentry;
611 : :
612 [ - + ]: 9 : if (PortalHashTable == NULL)
6212 neilc@samurai.com 613 :UBC 0 : return;
614 : :
6212 neilc@samurai.com 615 :CBC 9 : hash_seq_init(&status, PortalHashTable);
616 [ + + ]: 36 : while ((hentry = hash_seq_search(&status)) != NULL)
617 : : {
5995 bruce@momjian.us 618 : 27 : Portal portal = hentry->portal;
619 : :
620 : : /* Can't close the active portal (the one running the command) */
4795 tgl@sss.pgh.pa.us 621 [ + + ]: 27 : if (portal->status == PORTAL_ACTIVE)
622 : 15 : continue;
623 : :
624 : 12 : PortalDrop(portal, false);
625 : :
626 : : /* Restart the iteration in case that led to other drops */
627 : 12 : hash_seq_term(&status);
628 : 12 : hash_seq_init(&status, PortalHashTable);
629 : : }
630 : : }
631 : :
632 : : /*
633 : : * "Hold" a portal. Prepare it for access by later transactions.
634 : : */
635 : : static void
2209 peter_e@gmx.net 636 : 41 : HoldPortal(Portal portal)
637 : : {
638 : : /*
639 : : * Note that PersistHoldablePortal() must release all resources used by
640 : : * the portal that are local to the creating transaction.
641 : : */
642 : 41 : PortalCreateHoldStore(portal);
643 : 41 : PersistHoldablePortal(portal);
644 : :
645 : : /* drop cached plan reference, if any */
646 : 39 : PortalReleaseCachedPlan(portal);
647 : :
648 : : /*
649 : : * Any resources belonging to the portal will be released in the upcoming
650 : : * transaction-wide cleanup; the portal will no longer have its own
651 : : * resources.
652 : : */
653 : 39 : portal->resowner = NULL;
654 : :
655 : : /*
656 : : * Having successfully exported the holdable cursor, mark it as not
657 : : * belonging to this transaction.
658 : : */
659 : 39 : portal->createSubid = InvalidSubTransactionId;
660 : 39 : portal->activeSubid = InvalidSubTransactionId;
926 tgl@sss.pgh.pa.us 661 : 39 : portal->createLevel = 0;
2209 peter_e@gmx.net 662 : 39 : }
663 : :
664 : : /*
665 : : * Pre-commit processing for portals.
666 : : *
667 : : * Holdable cursors created in this transaction need to be converted to
668 : : * materialized form, since we are going to close down the executor and
669 : : * release locks. Non-holdable portals created in this transaction are
670 : : * simply removed. Portals remaining from prior transactions should be
671 : : * left untouched.
672 : : *
673 : : * Returns true if any portals changed state (possibly causing user-defined
674 : : * code to be run), false if not.
675 : : */
676 : : bool
4795 tgl@sss.pgh.pa.us 677 : 415662 : PreCommit_Portals(bool isPrepare)
678 : : {
6756 bruce@momjian.us 679 : 415662 : bool result = false;
680 : : HASH_SEQ_STATUS status;
681 : : PortalHashEnt *hentry;
682 : :
8227 tgl@sss.pgh.pa.us 683 : 415662 : hash_seq_init(&status, PortalHashTable);
684 : :
685 [ + + ]: 443204 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
686 : : {
7559 bruce@momjian.us 687 : 27542 : Portal portal = hentry->portal;
688 : :
689 : : /*
690 : : * There should be no pinned portals anymore. Complain if someone
691 : : * leaked one. Auto-held portals are allowed; we assume that whoever
692 : : * pinned them is managing them.
693 : : */
2209 peter_e@gmx.net 694 [ + + - + ]: 27542 : if (portal->portalPinned && !portal->autoHeld)
4795 tgl@sss.pgh.pa.us 695 [ # # ]:UBC 0 : elog(ERROR, "cannot commit while a portal is pinned");
696 : :
697 : : /*
698 : : * Do not touch active portals --- this can only happen in the case of
699 : : * a multi-transaction utility command, such as VACUUM, or a commit in
700 : : * a procedure.
701 : : *
702 : : * Note however that any resource owner attached to such a portal is
703 : : * still going to go away, so don't leave a dangling pointer. Also
704 : : * unregister any snapshots held by the portal, mainly to avoid
705 : : * snapshot leak warnings from ResourceOwnerRelease().
706 : : */
4795 tgl@sss.pgh.pa.us 707 [ + + ]:CBC 27542 : if (portal->status == PORTAL_ACTIVE)
708 : : {
2061 peter_e@gmx.net 709 [ + + ]: 22694 : if (portal->holdSnapshot)
710 : : {
711 [ + - ]: 1 : if (portal->resowner)
712 : 1 : UnregisterSnapshotFromOwner(portal->holdSnapshot,
713 : : portal->resowner);
714 : 1 : portal->holdSnapshot = NULL;
715 : : }
4795 tgl@sss.pgh.pa.us 716 : 22694 : portal->resowner = NULL;
717 : : /* Clear portalSnapshot too, for cleanliness */
1059 718 : 22694 : portal->portalSnapshot = NULL;
4795 719 : 22694 : continue;
720 : : }
721 : :
722 : : /* Is it a holdable portal created in the current xact? */
7211 723 [ + + ]: 4848 : if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
6943 724 [ + + ]: 246 : portal->createSubid != InvalidSubTransactionId &&
7211 725 [ + - ]: 23 : portal->status == PORTAL_READY)
726 : : {
727 : : /*
728 : : * We are exiting the transaction that created a holdable cursor.
729 : : * Instead of dropping the portal, prepare it for access by later
730 : : * transactions.
731 : : *
732 : : * However, if this is PREPARE TRANSACTION rather than COMMIT,
733 : : * refuse PREPARE, because the semantics seem pretty unclear.
734 : : */
4795 735 [ - + ]: 23 : if (isPrepare)
4795 tgl@sss.pgh.pa.us 736 [ # # ]:UBC 0 : ereport(ERROR,
737 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
738 : : errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
739 : :
2209 peter_e@gmx.net 740 :CBC 23 : HoldPortal(portal);
741 : :
742 : : /* Report we changed state */
6943 tgl@sss.pgh.pa.us 743 : 23 : result = true;
744 : : }
4795 745 [ + + ]: 4825 : else if (portal->createSubid == InvalidSubTransactionId)
746 : : {
747 : : /*
748 : : * Do nothing to cursors held over from a previous transaction
749 : : * (including ones we just froze in a previous cycle of this loop)
750 : : */
6943 751 : 265 : continue;
752 : : }
753 : : else
754 : : {
755 : : /* Zap all non-holdable portals */
4795 756 : 4560 : PortalDrop(portal, true);
757 : :
758 : : /* Report we changed state */
759 : 4560 : result = true;
760 : : }
761 : :
762 : : /*
763 : : * After either freezing or dropping a portal, we have to restart the
764 : : * iteration, because we could have invoked user-defined code that
765 : : * caused a drop of the next portal in the hash chain.
766 : : */
6198 767 : 4583 : hash_seq_term(&status);
6913 bruce@momjian.us 768 : 4583 : hash_seq_init(&status, PortalHashTable);
769 : : }
770 : :
4795 tgl@sss.pgh.pa.us 771 : 415662 : return result;
772 : : }
773 : :
774 : : /*
775 : : * Abort processing for portals.
776 : : *
777 : : * At this point we run the cleanup hook if present, but we can't release the
778 : : * portal's memory until the cleanup call.
779 : : */
780 : : void
7653 781 : 22849 : AtAbort_Portals(void)
782 : : {
783 : : HASH_SEQ_STATUS status;
784 : : PortalHashEnt *hentry;
785 : :
786 : 22849 : hash_seq_init(&status, PortalHashTable);
787 : :
788 [ + + ]: 36313 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
789 : : {
7559 bruce@momjian.us 790 : 13464 : Portal portal = hentry->portal;
791 : :
792 : : /*
793 : : * When elog(FATAL) is progress, we need to set the active portal to
794 : : * failed, so that PortalCleanup() doesn't run the executor shutdown.
795 : : */
2264 peter_e@gmx.net 796 [ + + + + ]: 13464 : if (portal->status == PORTAL_ACTIVE && shmem_exit_inprogress)
797 : 5 : MarkPortalFailed(portal);
798 : :
799 : : /*
800 : : * Do nothing else to cursors held over from a previous transaction.
801 : : */
7150 tgl@sss.pgh.pa.us 802 [ + + ]: 13464 : if (portal->createSubid == InvalidSubTransactionId)
7653 803 : 68 : continue;
804 : :
805 : : /*
806 : : * Do nothing to auto-held cursors. This is similar to the case of a
807 : : * cursor from a previous transaction, but it could also be that the
808 : : * cursor was auto-held in this transaction, so it wants to live on.
809 : : */
2209 peter_e@gmx.net 810 [ - + ]: 13396 : if (portal->autoHeld)
2209 peter_e@gmx.net 811 :UBC 0 : continue;
812 : :
813 : : /*
814 : : * If it was created in the current transaction, we can't do normal
815 : : * shutdown on a READY portal either; it might refer to objects
816 : : * created in the failed transaction. See comments in
817 : : * AtSubAbort_Portals.
818 : : */
5169 tgl@sss.pgh.pa.us 819 [ + + ]:CBC 13396 : if (portal->status == PORTAL_READY)
4442 820 : 135 : MarkPortalFailed(portal);
821 : :
822 : : /*
823 : : * Allow portalcmds.c to clean up the state it knows about, if we
824 : : * haven't already.
825 : : */
7653 826 [ + + ]: 13396 : if (PointerIsValid(portal->cleanup))
827 : : {
2411 peter_e@gmx.net 828 : 48 : portal->cleanup(portal);
7653 tgl@sss.pgh.pa.us 829 : 48 : portal->cleanup = NULL;
830 : : }
831 : :
832 : : /* drop cached plan reference, if any */
5200 833 : 13396 : PortalReleaseCachedPlan(portal);
834 : :
835 : : /*
836 : : * Any resources belonging to the portal will be released in the
837 : : * upcoming transaction-wide cleanup; they will be gone before we run
838 : : * PortalDrop.
839 : : */
7211 840 : 13396 : portal->resowner = NULL;
841 : :
842 : : /*
843 : : * Although we can't delete the portal data structure proper, we can
844 : : * release any memory in subsidiary contexts, such as executor state.
845 : : * The cleanup hook was the last thing that might have needed data
846 : : * there. But leave active portals alone.
847 : : */
2274 peter_e@gmx.net 848 [ + + ]: 13396 : if (portal->status != PORTAL_ACTIVE)
849 : 13308 : MemoryContextDeleteChildren(portal->portalContext);
850 : : }
8503 tgl@sss.pgh.pa.us 851 : 22849 : }
852 : :
853 : : /*
854 : : * Post-abort cleanup for portals.
855 : : *
856 : : * Delete all portals not held over from prior transactions. */
857 : : void
7653 858 : 22838 : AtCleanup_Portals(void)
859 : : {
860 : : HASH_SEQ_STATUS status;
861 : : PortalHashEnt *hentry;
862 : :
863 : 22838 : hash_seq_init(&status, PortalHashTable);
864 : :
865 [ + + ]: 35702 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
866 : : {
7559 bruce@momjian.us 867 : 12864 : Portal portal = hentry->portal;
868 : :
869 : : /*
870 : : * Do not touch active portals --- this can only happen in the case of
871 : : * a multi-transaction command.
872 : : */
2274 peter_e@gmx.net 873 [ + + ]: 12864 : if (portal->status == PORTAL_ACTIVE)
874 : 88 : continue;
875 : :
876 : : /*
877 : : * Do nothing to cursors held over from a previous transaction or
878 : : * auto-held ones.
879 : : */
2209 880 [ + + - + ]: 12776 : if (portal->createSubid == InvalidSubTransactionId || portal->autoHeld)
881 : : {
7195 tgl@sss.pgh.pa.us 882 [ - + ]: 68 : Assert(portal->status != PORTAL_ACTIVE);
883 [ - + ]: 68 : Assert(portal->resowner == NULL);
7653 884 : 68 : continue;
885 : : }
886 : :
887 : : /*
888 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
889 : : * let us drop the portal otherwise. Whoever pinned the portal was
890 : : * interrupted by the abort too and won't try to use it anymore.
891 : : */
5032 heikki.linnakangas@i 892 [ + + ]: 12708 : if (portal->portalPinned)
893 : 19 : portal->portalPinned = false;
894 : :
895 : : /*
896 : : * We had better not call any user-defined code during cleanup, so if
897 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
898 : : */
2435 tgl@sss.pgh.pa.us 899 [ - + ]: 12708 : if (PointerIsValid(portal->cleanup))
900 : : {
2435 tgl@sss.pgh.pa.us 901 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
902 : 0 : portal->cleanup = NULL;
903 : : }
904 : :
905 : : /* Zap it. */
7211 tgl@sss.pgh.pa.us 906 :CBC 12708 : PortalDrop(portal, false);
907 : : }
7653 908 : 22838 : }
909 : :
910 : : /*
911 : : * Portal-related cleanup when we return to the main loop on error.
912 : : *
913 : : * This is different from the cleanup at transaction abort. Auto-held portals
914 : : * are cleaned up on error but not on transaction abort.
915 : : */
916 : : void
2209 peter_e@gmx.net 917 : 19978 : PortalErrorCleanup(void)
918 : : {
919 : : HASH_SEQ_STATUS status;
920 : : PortalHashEnt *hentry;
921 : :
922 : 19978 : hash_seq_init(&status, PortalHashTable);
923 : :
924 [ + + ]: 40808 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
925 : : {
926 : 852 : Portal portal = hentry->portal;
927 : :
928 [ + + ]: 852 : if (portal->autoHeld)
929 : : {
930 : 2 : portal->portalPinned = false;
931 : 2 : PortalDrop(portal, false);
932 : : }
933 : : }
934 : 19978 : }
935 : :
936 : : /*
937 : : * Pre-subcommit processing for portals.
938 : : *
939 : : * Reassign portals created or used in the current subtransaction to the
940 : : * parent subtransaction.
941 : : */
942 : : void
7150 tgl@sss.pgh.pa.us 943 : 5376 : AtSubCommit_Portals(SubTransactionId mySubid,
944 : : SubTransactionId parentSubid,
945 : : int parentLevel,
946 : : ResourceOwner parentXactOwner)
947 : : {
948 : : HASH_SEQ_STATUS status;
949 : : PortalHashEnt *hentry;
950 : :
7227 951 : 5376 : hash_seq_init(&status, PortalHashTable);
952 : :
953 [ + + ]: 15467 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
954 : : {
7168 bruce@momjian.us 955 : 4715 : Portal portal = hentry->portal;
956 : :
7150 tgl@sss.pgh.pa.us 957 [ + + ]: 4715 : if (portal->createSubid == mySubid)
958 : : {
959 : 30 : portal->createSubid = parentSubid;
926 960 : 30 : portal->createLevel = parentLevel;
7211 961 [ + - ]: 30 : if (portal->resowner)
962 : 30 : ResourceOwnerNewParent(portal->resowner, parentXactOwner);
963 : : }
3145 964 [ + + ]: 4715 : if (portal->activeSubid == mySubid)
965 : 110 : portal->activeSubid = parentSubid;
966 : : }
7227 967 : 5376 : }
968 : :
969 : : /*
970 : : * Subtransaction abort handling for portals.
971 : : *
972 : : * Deactivate portals created or used during the failed subtransaction.
973 : : * Note that per AtSubCommit_Portals, this will catch portals created/used
974 : : * in descendants of the subtransaction too.
975 : : *
976 : : * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
977 : : */
978 : : void
7150 979 : 4575 : AtSubAbort_Portals(SubTransactionId mySubid,
980 : : SubTransactionId parentSubid,
981 : : ResourceOwner myXactOwner,
982 : : ResourceOwner parentXactOwner)
983 : : {
984 : : HASH_SEQ_STATUS status;
985 : : PortalHashEnt *hentry;
986 : :
7227 987 : 4575 : hash_seq_init(&status, PortalHashTable);
988 : :
989 [ + + ]: 10585 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
990 : : {
7168 bruce@momjian.us 991 : 6010 : Portal portal = hentry->portal;
992 : :
993 : : /* Was it created in this subtransaction? */
7150 tgl@sss.pgh.pa.us 994 [ + + ]: 6010 : if (portal->createSubid != mySubid)
995 : : {
996 : : /* No, but maybe it was used in this subtransaction? */
3145 997 [ + + ]: 5890 : if (portal->activeSubid == mySubid)
998 : : {
999 : : /* Maintain activeSubid until the portal is removed */
1000 : 22 : portal->activeSubid = parentSubid;
1001 : :
1002 : : /*
1003 : : * A MarkPortalActive() caller ran an upper-level portal in
1004 : : * this subtransaction and left the portal ACTIVE. This can't
1005 : : * happen, but force the portal into FAILED state for the same
1006 : : * reasons discussed below.
1007 : : *
1008 : : * We assume we can get away without forcing upper-level READY
1009 : : * portals to fail, even if they were run and then suspended.
1010 : : * In theory a suspended upper-level portal could have
1011 : : * acquired some references to objects that are about to be
1012 : : * destroyed, but there should be sufficient defenses against
1013 : : * such cases: the portal's original query cannot contain such
1014 : : * references, and any references within, say, cached plans of
1015 : : * PL/pgSQL functions are not from active queries and should
1016 : : * be protected by revalidation logic.
1017 : : */
1018 [ - + ]: 22 : if (portal->status == PORTAL_ACTIVE)
3145 tgl@sss.pgh.pa.us 1019 :UBC 0 : MarkPortalFailed(portal);
1020 : :
1021 : : /*
1022 : : * Also, if we failed it during the current subtransaction
1023 : : * (either just above, or earlier), reattach its resource
1024 : : * owner to the current subtransaction's resource owner, so
1025 : : * that any resources it still holds will be released while
1026 : : * cleaning up this subtransaction. This prevents some corner
1027 : : * cases wherein we might get Asserts or worse while cleaning
1028 : : * up objects created during the current subtransaction
1029 : : * (because they're still referenced within this portal).
1030 : : */
3145 tgl@sss.pgh.pa.us 1031 [ + + + - ]:CBC 22 : if (portal->status == PORTAL_FAILED && portal->resowner)
1032 : : {
1033 : 7 : ResourceOwnerNewParent(portal->resowner, myXactOwner);
1034 : 7 : portal->resowner = NULL;
1035 : : }
1036 : : }
1037 : : /* Done if it wasn't created in this subtransaction */
7227 1038 : 5890 : continue;
1039 : : }
1040 : :
1041 : : /*
1042 : : * Force any live portals of my own subtransaction into FAILED state.
1043 : : * We have to do this because they might refer to objects created or
1044 : : * changed in the failed subtransaction, leading to crashes within
1045 : : * ExecutorEnd when portalcmds.c tries to close down the portal.
1046 : : * Currently, every MarkPortalActive() caller ensures it updates the
1047 : : * portal status again before relinquishing control, so ACTIVE can't
1048 : : * happen here. If it does happen, dispose the portal like existing
1049 : : * MarkPortalActive() callers would.
1050 : : */
5169 1051 [ + + ]: 120 : if (portal->status == PORTAL_READY ||
1052 [ - + ]: 114 : portal->status == PORTAL_ACTIVE)
4442 1053 : 6 : MarkPortalFailed(portal);
1054 : :
1055 : : /*
1056 : : * Allow portalcmds.c to clean up the state it knows about, if we
1057 : : * haven't already.
1058 : : */
5169 1059 [ - + ]: 120 : if (PointerIsValid(portal->cleanup))
1060 : : {
2411 peter_e@gmx.net 1061 :UBC 0 : portal->cleanup(portal);
5169 tgl@sss.pgh.pa.us 1062 : 0 : portal->cleanup = NULL;
1063 : : }
1064 : :
1065 : : /* drop cached plan reference, if any */
5169 tgl@sss.pgh.pa.us 1066 :CBC 120 : PortalReleaseCachedPlan(portal);
1067 : :
1068 : : /*
1069 : : * Any resources belonging to the portal will be released in the
1070 : : * upcoming transaction-wide cleanup; they will be gone before we run
1071 : : * PortalDrop.
1072 : : */
1073 : 120 : portal->resowner = NULL;
1074 : :
1075 : : /*
1076 : : * Although we can't delete the portal data structure proper, we can
1077 : : * release any memory in subsidiary contexts, such as executor state.
1078 : : * The cleanup hook was the last thing that might have needed data
1079 : : * there.
1080 : : */
2311 peter_e@gmx.net 1081 : 120 : MemoryContextDeleteChildren(portal->portalContext);
1082 : : }
7227 tgl@sss.pgh.pa.us 1083 : 4575 : }
1084 : :
1085 : : /*
1086 : : * Post-subabort cleanup for portals.
1087 : : *
1088 : : * Drop all portals created in the failed subtransaction (but note that
1089 : : * we will not drop any that were reassigned to the parent above).
1090 : : */
1091 : : void
7150 1092 : 4575 : AtSubCleanup_Portals(SubTransactionId mySubid)
1093 : : {
1094 : : HASH_SEQ_STATUS status;
1095 : : PortalHashEnt *hentry;
1096 : :
7227 1097 : 4575 : hash_seq_init(&status, PortalHashTable);
1098 : :
1099 [ + + ]: 10472 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1100 : : {
1101 : 5897 : Portal portal = hentry->portal;
1102 : :
7150 1103 [ + + ]: 5897 : if (portal->createSubid != mySubid)
7227 1104 : 5890 : continue;
1105 : :
1106 : : /*
1107 : : * If a portal is still pinned, forcibly unpin it. PortalDrop will not
1108 : : * let us drop the portal otherwise. Whoever pinned the portal was
1109 : : * interrupted by the abort too and won't try to use it anymore.
1110 : : */
5024 heikki.linnakangas@i 1111 [ + + ]: 7 : if (portal->portalPinned)
1112 : 3 : portal->portalPinned = false;
1113 : :
1114 : : /*
1115 : : * We had better not call any user-defined code during cleanup, so if
1116 : : * the cleanup hook hasn't been run yet, too bad; we'll just skip it.
1117 : : */
2435 tgl@sss.pgh.pa.us 1118 [ - + ]: 7 : if (PointerIsValid(portal->cleanup))
1119 : : {
2435 tgl@sss.pgh.pa.us 1120 [ # # ]:UBC 0 : elog(WARNING, "skipping cleanup for portal \"%s\"", portal->name);
1121 : 0 : portal->cleanup = NULL;
1122 : : }
1123 : :
1124 : : /* Zap it. */
7211 tgl@sss.pgh.pa.us 1125 :CBC 7 : PortalDrop(portal, false);
1126 : : }
7227 1127 : 4575 : }
1128 : :
1129 : : /* Find all available cursors */
1130 : : Datum
6661 neilc@samurai.com 1131 : 60 : pg_cursor(PG_FUNCTION_ARGS)
1132 : : {
6198 tgl@sss.pgh.pa.us 1133 : 60 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
1134 : : HASH_SEQ_STATUS hash_seq;
1135 : : PortalHashEnt *hentry;
1136 : :
1137 : : /*
1138 : : * We put all the tuples into a tuplestore in one scan of the hashtable.
1139 : : * This avoids any issue of the hashtable possibly changing between calls.
1140 : : */
544 michael@paquier.xyz 1141 : 60 : InitMaterializedSRF(fcinfo, 0);
1142 : :
6198 tgl@sss.pgh.pa.us 1143 : 60 : hash_seq_init(&hash_seq, PortalHashTable);
1144 [ + + ]: 186 : while ((hentry = hash_seq_search(&hash_seq)) != NULL)
1145 : : {
1146 : 126 : Portal portal = hentry->portal;
1147 : : Datum values[6];
638 peter@eisentraut.org 1148 : 126 : bool nulls[6] = {0};
1149 : :
1150 : : /* report only "visible" entries */
6198 tgl@sss.pgh.pa.us 1151 [ + + ]: 126 : if (!portal->visible)
1152 : 63 : continue;
1153 : :
5864 1154 : 63 : values[0] = CStringGetTextDatum(portal->name);
5749 1155 : 63 : values[1] = CStringGetTextDatum(portal->sourceText);
6661 neilc@samurai.com 1156 : 63 : values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
1157 : 63 : values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
1158 : 63 : values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
1159 : 63 : values[5] = TimestampTzGetDatum(portal->creation_time);
1160 : :
769 michael@paquier.xyz 1161 : 63 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
1162 : : }
1163 : :
6198 tgl@sss.pgh.pa.us 1164 : 60 : return (Datum) 0;
1165 : : }
1166 : :
1167 : : bool
4152 simon@2ndQuadrant.co 1168 : 30 : ThereAreNoReadyPortals(void)
1169 : : {
1170 : : HASH_SEQ_STATUS status;
1171 : : PortalHashEnt *hentry;
1172 : :
1173 : 30 : hash_seq_init(&status, PortalHashTable);
1174 : :
1175 [ + + ]: 60 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1176 : : {
1177 : 30 : Portal portal = hentry->portal;
1178 : :
1179 [ - + ]: 30 : if (portal->status == PORTAL_READY)
4152 simon@2ndQuadrant.co 1180 :UBC 0 : return false;
1181 : : }
1182 : :
4152 simon@2ndQuadrant.co 1183 :CBC 30 : return true;
1184 : : }
1185 : :
1186 : : /*
1187 : : * Hold all pinned portals.
1188 : : *
1189 : : * When initiating a COMMIT or ROLLBACK inside a procedure, this must be
1190 : : * called to protect internally-generated cursors from being dropped during
1191 : : * the transaction shutdown. Currently, SPI calls this automatically; PLs
1192 : : * that initiate COMMIT or ROLLBACK some other way are on the hook to do it
1193 : : * themselves. (Note that we couldn't do this in, say, AtAbort_Portals
1194 : : * because we need to run user-defined code while persisting a portal.
1195 : : * It's too late to do that once transaction abort has started.)
1196 : : *
1197 : : * We protect such portals by converting them to held cursors. We mark them
1198 : : * as "auto-held" so that exception exit knows to clean them up. (In normal,
1199 : : * non-exception code paths, the PL needs to clean such portals itself, since
1200 : : * transaction end won't do it anymore; but that should be normal practice
1201 : : * anyway.)
1202 : : */
1203 : : void
2209 peter_e@gmx.net 1204 : 2205 : HoldPinnedPortals(void)
1205 : : {
1206 : : HASH_SEQ_STATUS status;
1207 : : PortalHashEnt *hentry;
1208 : :
2274 1209 : 2205 : hash_seq_init(&status, PortalHashTable);
1210 : :
1211 [ + + ]: 4470 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1212 : : {
1213 : 2268 : Portal portal = hentry->portal;
1214 : :
2209 1215 [ + + + + ]: 2268 : if (portal->portalPinned && !portal->autoHeld)
1216 : : {
1217 : : /*
1218 : : * Doing transaction control, especially abort, inside a cursor
1219 : : * loop that is not read-only, for example using UPDATE ...
1220 : : * RETURNING, has weird semantics issues. Also, this
1221 : : * implementation wouldn't work, because such portals cannot be
1222 : : * held. (The core grammar enforces that only SELECT statements
1223 : : * can drive a cursor, but for example PL/pgSQL does not restrict
1224 : : * it.)
1225 : : */
1226 [ + + ]: 19 : if (portal->strategy != PORTAL_ONE_SELECT)
1227 [ + - ]: 1 : ereport(ERROR,
1228 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1229 : : errmsg("cannot perform transaction commands inside a cursor loop that is not read-only")));
1230 : :
1231 : : /* Verify it's in a suitable state to be held */
1822 tgl@sss.pgh.pa.us 1232 [ - + ]: 18 : if (portal->status != PORTAL_READY)
1822 tgl@sss.pgh.pa.us 1233 [ # # ]:UBC 0 : elog(ERROR, "pinned portal is not ready to be auto-held");
1234 : :
2209 peter_e@gmx.net 1235 :CBC 18 : HoldPortal(portal);
1822 tgl@sss.pgh.pa.us 1236 : 16 : portal->autoHeld = true;
1237 : : }
1238 : : }
2274 peter_e@gmx.net 1239 : 2202 : }
1240 : :
1241 : : /*
1242 : : * Drop the outer active snapshots for all portals, so that no snapshots
1243 : : * remain active.
1244 : : *
1245 : : * Like HoldPinnedPortals, this must be called when initiating a COMMIT or
1246 : : * ROLLBACK inside a procedure. This has to be separate from that since it
1247 : : * should not be run until we're done with steps that are likely to fail.
1248 : : *
1249 : : * It's tempting to fold this into PreCommit_Portals, but to do so, we'd
1250 : : * need to clean up snapshot management in VACUUM and perhaps other places.
1251 : : */
1252 : : void
1059 tgl@sss.pgh.pa.us 1253 : 2202 : ForgetPortalSnapshots(void)
1254 : : {
1255 : : HASH_SEQ_STATUS status;
1256 : : PortalHashEnt *hentry;
1257 : 2202 : int numPortalSnaps = 0;
1258 : 2202 : int numActiveSnaps = 0;
1259 : :
1260 : : /* First, scan PortalHashTable and clear portalSnapshot fields */
1261 : 2202 : hash_seq_init(&status, PortalHashTable);
1262 : :
1263 [ + + ]: 6669 : while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
1264 : : {
1265 : 2265 : Portal portal = hentry->portal;
1266 : :
1267 [ + + ]: 2265 : if (portal->portalSnapshot != NULL)
1268 : : {
1269 : 2202 : portal->portalSnapshot = NULL;
1270 : 2202 : numPortalSnaps++;
1271 : : }
1272 : : /* portal->holdSnapshot will be cleaned up in PreCommit_Portals */
1273 : : }
1274 : :
1275 : : /*
1276 : : * Now, pop all the active snapshots, which should be just those that were
1277 : : * portal snapshots. Ideally we'd drive this directly off the portal
1278 : : * scan, but there's no good way to visit the portals in the correct
1279 : : * order. So just cross-check after the fact.
1280 : : */
1281 [ + + ]: 4404 : while (ActiveSnapshotSet())
1282 : : {
1283 : 2202 : PopActiveSnapshot();
1284 : 2202 : numActiveSnaps++;
1285 : : }
1286 : :
1287 [ - + ]: 2202 : if (numPortalSnaps != numActiveSnaps)
1059 tgl@sss.pgh.pa.us 1288 [ # # ]:UBC 0 : elog(ERROR, "portal snapshots (%d) did not account for all active snapshots (%d)",
1289 : : numPortalSnaps, numActiveSnaps);
1059 tgl@sss.pgh.pa.us 1290 :CBC 2202 : }
|