Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * proc.c
4 : : * routines to manage per-process shared memory data structure
5 : : *
6 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/proc.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /*
16 : : * Interface (a):
17 : : * ProcSleep(), ProcWakeup(),
18 : : *
19 : : * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 : : * the lock wakes the process up again (and gives it an error code so it knows
21 : : * whether it was awoken on an error condition).
22 : : *
23 : : * Interface (b):
24 : : *
25 : : * ProcReleaseLocks -- frees the locks associated with current transaction
26 : : *
27 : : * ProcKill -- destroys the shared memory state (and locks)
28 : : * associated with the process.
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : : #include <sys/time.h>
35 : :
36 : : #include "access/transam.h"
37 : : #include "access/twophase.h"
38 : : #include "access/xlogutils.h"
39 : : #include "miscadmin.h"
40 : : #include "pgstat.h"
41 : : #include "postmaster/autovacuum.h"
42 : : #include "replication/slotsync.h"
43 : : #include "replication/syncrep.h"
44 : : #include "storage/condition_variable.h"
45 : : #include "storage/ipc.h"
46 : : #include "storage/lmgr.h"
47 : : #include "storage/pmsignal.h"
48 : : #include "storage/proc.h"
49 : : #include "storage/procarray.h"
50 : : #include "storage/procsignal.h"
51 : : #include "storage/spin.h"
52 : : #include "storage/standby.h"
53 : : #include "utils/timeout.h"
54 : : #include "utils/timestamp.h"
55 : :
56 : : /* GUC variables */
57 : : int DeadlockTimeout = 1000;
58 : : int StatementTimeout = 0;
59 : : int LockTimeout = 0;
60 : : int IdleInTransactionSessionTimeout = 0;
61 : : int TransactionTimeout = 0;
62 : : int IdleSessionTimeout = 0;
63 : : bool log_lock_waits = false;
64 : :
65 : : /* Pointer to this process's PGPROC struct, if any */
66 : : PGPROC *MyProc = NULL;
67 : :
68 : : /*
69 : : * This spinlock protects the freelist of recycled PGPROC structures.
70 : : * We cannot use an LWLock because the LWLock manager depends on already
71 : : * having a PGPROC and a wait semaphore! But these structures are touched
72 : : * relatively infrequently (only at backend startup or shutdown) and not for
73 : : * very long, so a spinlock is okay.
74 : : */
75 : : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
76 : :
77 : : /* Pointers to shared-memory structures */
78 : : PROC_HDR *ProcGlobal = NULL;
79 : : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
80 : : PGPROC *PreparedXactProcs = NULL;
81 : :
82 : : /* If we are waiting for a lock, this points to the associated LOCALLOCK */
83 : : static LOCALLOCK *lockAwaited = NULL;
84 : :
85 : : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
86 : :
87 : : /* Is a deadlock check pending? */
88 : : static volatile sig_atomic_t got_deadlock_timeout;
89 : :
90 : : static void RemoveProcFromArray(int code, Datum arg);
91 : : static void ProcKill(int code, Datum arg);
92 : : static void AuxiliaryProcKill(int code, Datum arg);
93 : : static void CheckDeadLock(void);
94 : :
95 : :
96 : : /*
97 : : * Report shared-memory space needed by InitProcGlobal.
98 : : */
99 : : Size
6876 tgl@sss.pgh.pa.us 100 :CBC 1679 : ProcGlobalShmemSize(void)
101 : : {
6812 102 : 1679 : Size size = 0;
103 : : Size TotalProcs =
331 104 : 1679 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
105 : :
106 : : /* ProcGlobal */
6812 107 : 1679 : size = add_size(size, sizeof(PROC_HDR));
1339 andres@anarazel.de 108 : 1679 : size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
6812 tgl@sss.pgh.pa.us 109 : 1679 : size = add_size(size, sizeof(slock_t));
110 : :
1339 andres@anarazel.de 111 : 1679 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
112 : 1679 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
1245 alvherre@alvh.no-ip. 113 : 1679 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
114 : :
7137 tgl@sss.pgh.pa.us 115 : 1679 : return size;
116 : : }
117 : :
118 : : /*
119 : : * Report number of semaphores needed by InitProcGlobal.
120 : : */
121 : : int
6876 122 : 1679 : ProcGlobalSemas(void)
123 : : {
124 : : /*
125 : : * We need a sema per backend (including autovacuum), plus one for each
126 : : * auxiliary process.
127 : : */
733 rhaas@postgresql.org 128 : 1679 : return MaxBackends + NUM_AUXILIARY_PROCS;
129 : : }
130 : :
131 : : /*
132 : : * InitProcGlobal -
133 : : * Initialize the global process table during postmaster or standalone
134 : : * backend startup.
135 : : *
136 : : * We also create all the per-process semaphores we will need to support
137 : : * the requested number of backends. We used to allocate semaphores
138 : : * only when backends were actually started up, but that is bad because
139 : : * it lets Postgres fail under load --- a lot of Unix systems are
140 : : * (mis)configured with small limits on the number of semaphores, and
141 : : * running out when trying to start another backend is a common failure.
142 : : * So, now we grab enough semaphores to support the desired max number
143 : : * of backends immediately at initialization --- if the sysadmin has set
144 : : * MaxConnections, max_worker_processes, max_wal_senders, or
145 : : * autovacuum_max_workers higher than his kernel will support, he'll
146 : : * find out sooner rather than later.
147 : : *
148 : : * Another reason for creating semaphores here is that the semaphore
149 : : * implementation typically requires us to create semaphores in the
150 : : * postmaster, not in backends.
151 : : *
152 : : * Note: this is NOT called by individual backends under a postmaster,
153 : : * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
154 : : * pointers must be propagated specially for EXEC_BACKEND operation.
155 : : */
156 : : void
6876 tgl@sss.pgh.pa.us 157 : 898 : InitProcGlobal(void)
158 : : {
159 : : PGPROC *procs;
160 : : int i,
161 : : j;
162 : : bool found;
733 rhaas@postgresql.org 163 : 898 : uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
164 : :
165 : : /* Create the ProcGlobal shared structure */
9716 bruce@momjian.us 166 : 898 : ProcGlobal = (PROC_HDR *)
6675 tgl@sss.pgh.pa.us 167 : 898 : ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
168 [ - + ]: 898 : Assert(!found);
169 : :
170 : : /*
171 : : * Initialize the data structures.
172 : : */
4690 rhaas@postgresql.org 173 : 898 : ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
452 andres@anarazel.de 174 : 898 : dlist_init(&ProcGlobal->freeProcs);
175 : 898 : dlist_init(&ProcGlobal->autovacFreeProcs);
176 : 898 : dlist_init(&ProcGlobal->bgworkerFreeProcs);
177 : 898 : dlist_init(&ProcGlobal->walsenderFreeProcs);
4639 tgl@sss.pgh.pa.us 178 : 898 : ProcGlobal->startupBufferPinWaitBufId = -1;
4359 179 : 898 : ProcGlobal->walwriterLatch = NULL;
180 : 898 : ProcGlobal->checkpointerLatch = NULL;
42 heikki.linnakangas@i 181 :GNC 898 : pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PROC_NUMBER);
182 : 898 : pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PROC_NUMBER);
183 : :
184 : : /*
185 : : * Create and initialize all the PGPROC structures we'll need. There are
186 : : * five separate consumers: (1) normal backends, (2) autovacuum workers
187 : : * and the autovacuum launcher, (3) background workers, (4) auxiliary
188 : : * processes, and (5) prepared transactions. Each PGPROC structure is
189 : : * dedicated to exactly one of these purposes, and they do not move
190 : : * between groups.
191 : : */
4690 rhaas@postgresql.org 192 :CBC 898 : procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
2782 tgl@sss.pgh.pa.us 193 [ + - + - : 898 : MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
+ - - + -
- ]
4705 rhaas@postgresql.org 194 : 898 : ProcGlobal->allProcs = procs;
195 : : /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
733 196 : 898 : ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
197 : :
198 : : /*
199 : : * Allocate arrays mirroring PGPROC fields in a dense manner. See
200 : : * PROC_HDR.
201 : : *
202 : : * XXX: It might make sense to increase padding for these arrays, given
203 : : * how hotly they are accessed.
204 : : */
1339 andres@anarazel.de 205 : 1796 : ProcGlobal->xids =
206 : 898 : (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
207 [ + - + + : 38646 : MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
+ - + - +
+ ]
208 : 898 : ProcGlobal->subxidStates = (XidCacheStatus *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->subxidStates));
209 [ + - + + : 7262 : MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
+ - + - +
+ ]
1245 alvherre@alvh.no-ip. 210 : 898 : ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
211 [ + - + + : 4044 : MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
+ - + - +
+ ]
212 : :
4690 rhaas@postgresql.org 213 [ + + ]: 81509 : for (i = 0; i < TotalProcs; i++)
214 : : {
452 andres@anarazel.de 215 : 80611 : PGPROC *proc = &procs[i];
216 : :
217 : : /* Common initialization for all PGPROCs, regardless of type. */
218 : :
219 : : /*
220 : : * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
221 : : * dummy PGPROCs don't need these though - they're never associated
222 : : * with a real process
223 : : */
733 rhaas@postgresql.org 224 [ + + ]: 80611 : if (i < MaxBackends + NUM_AUXILIARY_PROCS)
225 : : {
452 andres@anarazel.de 226 : 79822 : proc->sem = PGSemaphoreCreate();
227 : 79822 : InitSharedLatch(&(proc->procLatch));
228 : 79822 : LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
229 : : }
230 : :
231 : : /*
232 : : * Newly created PGPROCs for normal backends, autovacuum and bgworkers
233 : : * must be queued up on the appropriate free list. Because there can
234 : : * only ever be a small, fixed number of auxiliary processes, no free
235 : : * list is used in that case; InitAuxiliaryProcess() instead uses a
236 : : * linear search. PGPROCs for prepared transactions are added to a
237 : : * free list by TwoPhaseShmemInit().
238 : : */
4690 rhaas@postgresql.org 239 [ + + ]: 80611 : if (i < MaxConnections)
240 : : {
241 : : /* PGPROC for normal backend, add to freeProcs list */
42 heikki.linnakangas@i 242 :GNC 57700 : dlist_push_tail(&ProcGlobal->freeProcs, &proc->links);
452 andres@anarazel.de 243 :CBC 57700 : proc->procgloballist = &ProcGlobal->freeProcs;
244 : : }
4147 alvherre@alvh.no-ip. 245 [ + + ]: 22911 : else if (i < MaxConnections + autovacuum_max_workers + 1)
246 : : {
247 : : /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
42 heikki.linnakangas@i 248 :GNC 3588 : dlist_push_tail(&ProcGlobal->autovacFreeProcs, &proc->links);
452 andres@anarazel.de 249 :CBC 3588 : proc->procgloballist = &ProcGlobal->autovacFreeProcs;
250 : : }
1888 michael@paquier.xyz 251 [ + + ]: 19323 : else if (i < MaxConnections + autovacuum_max_workers + 1 + max_worker_processes)
252 : : {
253 : : /* PGPROC for bgworker, add to bgworkerFreeProcs list */
42 heikki.linnakangas@i 254 :GNC 7196 : dlist_push_tail(&ProcGlobal->bgworkerFreeProcs, &proc->links);
452 andres@anarazel.de 255 :CBC 7196 : proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
256 : : }
733 rhaas@postgresql.org 257 [ + + ]: 12127 : else if (i < MaxBackends)
258 : : {
259 : : /* PGPROC for walsender, add to walsenderFreeProcs list */
42 heikki.linnakangas@i 260 :GNC 5950 : dlist_push_tail(&ProcGlobal->walsenderFreeProcs, &proc->links);
452 andres@anarazel.de 261 :CBC 5950 : proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
262 : : }
263 : :
264 : : /* Initialize myProcLocks[] shared memory queues. */
4548 rhaas@postgresql.org 265 [ + + ]: 1370387 : for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
452 andres@anarazel.de 266 : 1289776 : dlist_init(&(proc->myProcLocks[j]));
267 : :
268 : : /* Initialize lockGroupMembers list. */
269 : 80611 : dlist_init(&proc->lockGroupMembers);
270 : :
271 : : /*
272 : : * Initialize the atomic variables, otherwise, it won't be safe to
273 : : * access them for backends that aren't currently in use.
274 : : */
42 heikki.linnakangas@i 275 :GNC 80611 : pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PROC_NUMBER);
276 : 80611 : pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PROC_NUMBER);
452 andres@anarazel.de 277 :CBC 80611 : pg_atomic_init_u64(&(proc->waitStart), 0);
278 : : }
279 : :
280 : : /*
281 : : * Save pointers to the blocks of PGPROC structures reserved for auxiliary
282 : : * processes and prepared transactions.
283 : : */
733 rhaas@postgresql.org 284 : 898 : AuxiliaryProcs = &procs[MaxBackends];
285 : 898 : PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
286 : :
287 : : /* Create ProcStructLock spinlock, too */
6675 tgl@sss.pgh.pa.us 288 : 898 : ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
289 : 898 : SpinLockInit(ProcStructLock);
10141 scrappy@hub.org 290 : 898 : }
291 : :
292 : : /*
293 : : * InitProcess -- initialize a per-process PGPROC entry for this backend
294 : : */
295 : : void
8538 tgl@sss.pgh.pa.us 296 : 16356 : InitProcess(void)
297 : : {
298 : : dlist_head *procgloballist;
299 : :
300 : : /*
301 : : * ProcGlobal should be set up already (if we are a backend, we inherit
302 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
303 : : */
3103 rhaas@postgresql.org 304 [ - + ]: 16356 : if (ProcGlobal == NULL)
7570 tgl@sss.pgh.pa.us 305 [ # # ]:UBC 0 : elog(PANIC, "proc header uninitialized");
306 : :
8255 tgl@sss.pgh.pa.us 307 [ - + ]:CBC 16356 : if (MyProc != NULL)
7570 tgl@sss.pgh.pa.us 308 [ # # ]:UBC 0 : elog(ERROR, "you already exist");
309 : :
310 : : /* Decide which list should supply our PGPROC. */
41 heikki.linnakangas@i 311 [ + + + + ]:GNC 16356 : if (AmAutoVacuumLauncherProcess() || AmAutoVacuumWorkerProcess())
3103 rhaas@postgresql.org 312 :CBC 2620 : procgloballist = &ProcGlobal->autovacFreeProcs;
41 heikki.linnakangas@i 313 [ + + ]:GNC 13736 : else if (AmBackgroundWorkerProcess())
3103 rhaas@postgresql.org 314 :CBC 2412 : procgloballist = &ProcGlobal->bgworkerFreeProcs;
41 heikki.linnakangas@i 315 [ + + ]:GNC 11324 : else if (AmWalSenderProcess())
1888 michael@paquier.xyz 316 :CBC 1031 : procgloballist = &ProcGlobal->walsenderFreeProcs;
317 : : else
3103 rhaas@postgresql.org 318 : 10293 : procgloballist = &ProcGlobal->freeProcs;
319 : :
320 : : /*
321 : : * Try to get a proc struct from the appropriate free list. If this
322 : : * fails, we must be out of PGPROC structures (not to mention semaphores).
323 : : *
324 : : * While we are holding the ProcStructLock, also copy the current shared
325 : : * estimate of spins_per_delay to local storage.
326 : : */
8233 tgl@sss.pgh.pa.us 327 [ + + ]: 16356 : SpinLockAcquire(ProcStructLock);
328 : :
3103 rhaas@postgresql.org 329 : 16356 : set_spins_per_delay(ProcGlobal->spins_per_delay);
330 : :
452 andres@anarazel.de 331 [ + + ]: 16356 : if (!dlist_is_empty(procgloballist))
332 : : {
331 tgl@sss.pgh.pa.us 333 : 16354 : MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
8233 334 : 16354 : SpinLockRelease(ProcStructLock);
335 : : }
336 : : else
337 : : {
338 : : /*
339 : : * If we reach here, all the PGPROCs are in use. This is one of the
340 : : * possible places to detect "too many backends", so give the standard
341 : : * error message. XXX do we need to give a different failure message
342 : : * in the autovacuum case?
343 : : */
344 : 2 : SpinLockRelease(ProcStructLock);
41 heikki.linnakangas@i 345 [ + - ]:GNC 2 : if (AmWalSenderProcess())
1888 michael@paquier.xyz 346 [ + - ]:CBC 2 : ereport(FATAL,
347 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
348 : : errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
349 : : max_wal_senders)));
7570 tgl@sss.pgh.pa.us 350 [ # # ]:UBC 0 : ereport(FATAL,
351 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
352 : : errmsg("sorry, too many clients already")));
353 : : }
52 heikki.linnakangas@i 354 :GNC 16354 : MyProcNumber = GetNumberFromPGProc(MyProc);
355 : :
356 : : /*
357 : : * Cross-check that the PGPROC is of the type we expect; if this were not
358 : : * the case, it would get returned to the wrong list.
359 : : */
3183 rhaas@postgresql.org 360 [ - + ]:CBC 16354 : Assert(MyProc->procgloballist == procgloballist);
361 : :
362 : : /*
363 : : * Now that we have a PGPROC, mark ourselves as an active postmaster
364 : : * child; this is so that the postmaster can detect it if we exit without
365 : : * cleaning up. (XXX autovac launcher currently doesn't participate in
366 : : * this; it probably should.)
367 : : *
368 : : * Slot sync worker also does not participate in it, see comments atop
369 : : * 'struct bkend' in postmaster.c.
370 : : */
41 heikki.linnakangas@i 371 [ + + + + ]:GNC 16354 : if (IsUnderPostmaster && !AmAutoVacuumLauncherProcess() &&
372 [ + + ]: 14397 : !AmLogicalSlotSyncWorkerProcess())
4983 tgl@sss.pgh.pa.us 373 :CBC 14393 : MarkPostmasterChildActive();
374 : :
375 : : /*
376 : : * Initialize all fields of MyProc, except for those previously
377 : : * initialized by InitProcGlobal.
378 : : */
452 andres@anarazel.de 379 : 16354 : dlist_node_init(&MyProc->links);
1397 peter@eisentraut.org 380 : 16354 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4154 simon@2ndQuadrant.co 381 : 16354 : MyProc->fpVXIDLock = false;
382 : 16354 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1339 andres@anarazel.de 383 : 16354 : MyProc->xid = InvalidTransactionId;
1340 384 : 16354 : MyProc->xmin = InvalidTransactionId;
8233 tgl@sss.pgh.pa.us 385 : 16354 : MyProc->pid = MyProcPid;
42 heikki.linnakangas@i 386 :GNC 16354 : MyProc->vxid.procNumber = MyProcNumber;
387 : 16354 : MyProc->vxid.lxid = InvalidLocalTransactionId;
388 : : /* databaseId and roleId will be filled in later */
6675 tgl@sss.pgh.pa.us 389 :CBC 16354 : MyProc->databaseId = InvalidOid;
6832 390 : 16354 : MyProc->roleId = InvalidOid;
2071 michael@paquier.xyz 391 : 16354 : MyProc->tempNamespaceId = InvalidOid;
41 heikki.linnakangas@i 392 :GNC 16354 : MyProc->isBackgroundWorker = AmBackgroundWorkerProcess();
737 rhaas@postgresql.org 393 :CBC 16354 : MyProc->delayChkptFlags = 0;
1245 alvherre@alvh.no-ip. 394 : 16354 : MyProc->statusFlags = 0;
395 : : /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
41 heikki.linnakangas@i 396 [ + + ]:GNC 16354 : if (AmAutoVacuumWorkerProcess())
1245 alvherre@alvh.no-ip. 397 :CBC 750 : MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
511 andres@anarazel.de 398 : 16354 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4458 heikki.linnakangas@i 399 : 16354 : MyProc->lwWaitMode = 0;
8483 tgl@sss.pgh.pa.us 400 : 16354 : MyProc->waitLock = NULL;
7170 401 : 16354 : MyProc->waitProcLock = NULL;
1147 fujii@postgresql.org 402 : 16354 : pg_atomic_write_u64(&MyProc->waitStart, 0);
403 : : #ifdef USE_ASSERT_CHECKING
404 : : {
405 : : int i;
406 : :
407 : : /* Last process should have released all locks. */
4548 rhaas@postgresql.org 408 [ + + ]: 278018 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
452 andres@anarazel.de 409 [ - + ]: 261664 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
410 : : }
411 : : #endif
5202 simon@2ndQuadrant.co 412 : 16354 : MyProc->recoveryConflictPending = false;
413 : :
414 : : /* Initialize fields for sync rep */
4312 heikki.linnakangas@i 415 : 16354 : MyProc->waitLSN = 0;
4788 simon@2ndQuadrant.co 416 : 16354 : MyProc->syncRepState = SYNC_REP_NOT_WAITING;
452 andres@anarazel.de 417 : 16354 : dlist_node_init(&MyProc->syncRepLinks);
418 : :
419 : : /* Initialize fields for group XID clearing. */
2985 rhaas@postgresql.org 420 : 16354 : MyProc->procArrayGroupMember = false;
421 : 16354 : MyProc->procArrayGroupMemberXid = InvalidTransactionId;
42 heikki.linnakangas@i 422 [ - + ]:GNC 16354 : Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PROC_NUMBER);
423 : :
424 : : /* Check that group locking fields are in a proper initial state. */
2989 rhaas@postgresql.org 425 [ - + ]:CBC 16354 : Assert(MyProc->lockGroupLeader == NULL);
426 [ - + ]: 16354 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
427 : :
428 : : /* Initialize wait event information. */
2957 429 : 16354 : MyProc->wait_event_info = 0;
430 : :
431 : : /* Initialize fields for group transaction status update. */
2417 432 : 16354 : MyProc->clogGroupMember = false;
433 : 16354 : MyProc->clogGroupMemberXid = InvalidTransactionId;
434 : 16354 : MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
435 : 16354 : MyProc->clogGroupMemberPage = -1;
436 : 16354 : MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
42 heikki.linnakangas@i 437 [ - + ]:GNC 16354 : Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PROC_NUMBER);
438 : :
439 : : /*
440 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
441 : : * on it. That allows us to repoint the process latch, which so far
442 : : * points to process local one, to the shared one.
443 : : */
4631 tgl@sss.pgh.pa.us 444 :CBC 16354 : OwnLatch(&MyProc->procLatch);
3378 andres@anarazel.de 445 : 16354 : SwitchToSharedLatch();
446 : :
447 : : /* now that we have a proc, report wait events to shared memory */
1107 448 : 16354 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
449 : :
450 : : /*
451 : : * We might be reusing a semaphore that belonged to a failed process. So
452 : : * be careful and reinitialize its value here. (This is not strictly
453 : : * necessary anymore, but seems like a good idea for cleanliness.)
454 : : */
2680 tgl@sss.pgh.pa.us 455 : 16354 : PGSemaphoreReset(MyProc->sem);
456 : :
457 : : /*
458 : : * Arrange to clean up at backend exit.
459 : : */
8491 460 : 16354 : on_shmem_exit(ProcKill, 0);
461 : :
462 : : /*
463 : : * Now that we have a PGPROC, we could try to acquire locks, so initialize
464 : : * local state needed for LWLocks, and the deadlock checker.
465 : : */
3576 heikki.linnakangas@i 466 : 16354 : InitLWLockAccess();
8480 tgl@sss.pgh.pa.us 467 : 16354 : InitDeadLockChecking();
468 : :
469 : : #ifdef EXEC_BACKEND
470 : :
471 : : /*
472 : : * Initialize backend-local pointers to all the shared data structures.
473 : : * (We couldn't do this until now because it needs LWLocks.)
474 : : */
475 : : if (IsUnderPostmaster)
476 : : AttachSharedMemoryStructs();
477 : : #endif
8491 478 : 16354 : }
479 : :
480 : : /*
481 : : * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
482 : : *
483 : : * This is separate from InitProcess because we can't acquire LWLocks until
484 : : * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
485 : : * work until after we've done AttachSharedMemoryStructs.
486 : : */
487 : : void
6675 488 : 16344 : InitProcessPhase2(void)
489 : : {
490 [ - + ]: 16344 : Assert(MyProc != NULL);
491 : :
492 : : /*
493 : : * Add our PGPROC to the PGPROC array in shared memory.
494 : : */
495 : 16344 : ProcArrayAdd(MyProc);
496 : :
497 : : /*
498 : : * Arrange to clean that up at backend exit.
499 : : */
500 : 16344 : on_shmem_exit(RemoveProcFromArray, 0);
501 : 16344 : }
502 : :
503 : : /*
504 : : * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
505 : : *
506 : : * This is called by bgwriter and similar processes so that they will have a
507 : : * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
508 : : * and sema that are assigned are one of the extra ones created during
509 : : * InitProcGlobal.
510 : : *
511 : : * Auxiliary processes are presently not expected to wait for real (lockmgr)
512 : : * locks, so we need not set up the deadlock checker. They are never added
513 : : * to the ProcArray or the sinval messaging mechanism, either. They also
514 : : * don't get a VXID assigned, since this is only useful when we actually
515 : : * hold lockmgr locks.
516 : : *
517 : : * Startup process however uses locks but never waits for them in the
518 : : * normal backend sense. Startup process also takes part in sinval messaging
519 : : * as a sendOnly process, so never reads messages from sinval queue. So
520 : : * Startup process does have a VXID and does show up in pg_locks.
521 : : */
522 : : void
6248 alvherre@alvh.no-ip. 523 : 3224 : InitAuxiliaryProcess(void)
524 : : {
525 : : PGPROC *auxproc;
526 : : int proctype;
527 : :
528 : : /*
529 : : * ProcGlobal should be set up already (if we are a backend, we inherit
530 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
531 : : */
532 [ + - - + ]: 3224 : if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
7570 tgl@sss.pgh.pa.us 533 [ # # ]:UBC 0 : elog(PANIC, "proc header uninitialized");
534 : :
8233 tgl@sss.pgh.pa.us 535 [ - + ]:CBC 3224 : if (MyProc != NULL)
7570 tgl@sss.pgh.pa.us 536 [ # # ]:UBC 0 : elog(ERROR, "you already exist");
537 : :
538 : : /*
539 : : * We use the ProcStructLock to protect assignment and releasing of
540 : : * AuxiliaryProcs entries.
541 : : *
542 : : * While we are holding the ProcStructLock, also copy the current shared
543 : : * estimate of spins_per_delay to local storage.
544 : : */
6760 tgl@sss.pgh.pa.us 545 [ + + ]:CBC 3224 : SpinLockAcquire(ProcStructLock);
546 : :
547 : 3224 : set_spins_per_delay(ProcGlobal->spins_per_delay);
548 : :
549 : : /*
550 : : * Find a free auxproc ... *big* trouble if there isn't one ...
551 : : */
6248 alvherre@alvh.no-ip. 552 [ + - ]: 7625 : for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
553 : : {
554 : 7625 : auxproc = &AuxiliaryProcs[proctype];
555 [ + + ]: 7625 : if (auxproc->pid == 0)
6675 tgl@sss.pgh.pa.us 556 : 3224 : break;
557 : : }
6248 alvherre@alvh.no-ip. 558 [ - + ]: 3224 : if (proctype >= NUM_AUXILIARY_PROCS)
559 : : {
6760 tgl@sss.pgh.pa.us 560 :UBC 0 : SpinLockRelease(ProcStructLock);
6248 alvherre@alvh.no-ip. 561 [ # # ]: 0 : elog(FATAL, "all AuxiliaryProcs are in use");
562 : : }
563 : :
564 : : /* Mark auxiliary proc as in use by me */
565 : : /* use volatile pointer to prevent code rearrangement */
6248 alvherre@alvh.no-ip. 566 :CBC 3224 : ((volatile PGPROC *) auxproc)->pid = MyProcPid;
567 : :
6760 tgl@sss.pgh.pa.us 568 : 3224 : SpinLockRelease(ProcStructLock);
569 : :
42 heikki.linnakangas@i 570 :GNC 3224 : MyProc = auxproc;
52 571 : 3224 : MyProcNumber = GetNumberFromPGProc(MyProc);
572 : :
573 : : /*
574 : : * Initialize all fields of MyProc, except for those previously
575 : : * initialized by InitProcGlobal.
576 : : */
452 andres@anarazel.de 577 :CBC 3224 : dlist_node_init(&MyProc->links);
1397 peter@eisentraut.org 578 : 3224 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4154 simon@2ndQuadrant.co 579 : 3224 : MyProc->fpVXIDLock = false;
580 : 3224 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1339 andres@anarazel.de 581 : 3224 : MyProc->xid = InvalidTransactionId;
1340 582 : 3224 : MyProc->xmin = InvalidTransactionId;
42 heikki.linnakangas@i 583 :GNC 3224 : MyProc->vxid.procNumber = INVALID_PROC_NUMBER;
584 : 3224 : MyProc->vxid.lxid = InvalidLocalTransactionId;
6675 tgl@sss.pgh.pa.us 585 :CBC 3224 : MyProc->databaseId = InvalidOid;
6832 586 : 3224 : MyProc->roleId = InvalidOid;
2071 michael@paquier.xyz 587 : 3224 : MyProc->tempNamespaceId = InvalidOid;
41 heikki.linnakangas@i 588 :GNC 3224 : MyProc->isBackgroundWorker = AmBackgroundWorkerProcess();
737 rhaas@postgresql.org 589 :CBC 3224 : MyProc->delayChkptFlags = 0;
1245 alvherre@alvh.no-ip. 590 : 3224 : MyProc->statusFlags = 0;
511 andres@anarazel.de 591 : 3224 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4458 heikki.linnakangas@i 592 : 3224 : MyProc->lwWaitMode = 0;
8233 tgl@sss.pgh.pa.us 593 : 3224 : MyProc->waitLock = NULL;
7170 594 : 3224 : MyProc->waitProcLock = NULL;
1147 fujii@postgresql.org 595 : 3224 : pg_atomic_write_u64(&MyProc->waitStart, 0);
596 : : #ifdef USE_ASSERT_CHECKING
597 : : {
598 : : int i;
599 : :
600 : : /* Last process should have released all locks. */
4548 rhaas@postgresql.org 601 [ + + ]: 54808 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
452 andres@anarazel.de 602 [ - + ]: 51584 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
603 : : }
604 : : #endif
605 : :
606 : : /*
607 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
608 : : * on it. That allows us to repoint the process latch, which so far
609 : : * points to process local one, to the shared one.
610 : : */
4631 tgl@sss.pgh.pa.us 611 : 3224 : OwnLatch(&MyProc->procLatch);
3378 andres@anarazel.de 612 : 3224 : SwitchToSharedLatch();
613 : :
614 : : /* now that we have a proc, report wait events to shared memory */
1107 615 : 3224 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
616 : :
617 : : /* Check that group locking fields are in a proper initial state. */
2989 rhaas@postgresql.org 618 [ - + ]: 3224 : Assert(MyProc->lockGroupLeader == NULL);
619 [ - + ]: 3224 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
620 : :
621 : : /*
622 : : * We might be reusing a semaphore that belonged to a failed process. So
623 : : * be careful and reinitialize its value here. (This is not strictly
624 : : * necessary anymore, but seems like a good idea for cleanliness.)
625 : : */
2680 tgl@sss.pgh.pa.us 626 : 3224 : PGSemaphoreReset(MyProc->sem);
627 : :
628 : : /*
629 : : * Arrange to clean up at process exit.
630 : : */
6248 alvherre@alvh.no-ip. 631 : 3224 : on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
632 : :
633 : : /*
634 : : * Now that we have a PGPROC, we could try to acquire lightweight locks.
635 : : * Initialize local state needed for them. (Heavyweight locks cannot be
636 : : * acquired in aux processes.)
637 : : */
135 heikki.linnakangas@i 638 :GNC 3224 : InitLWLockAccess();
639 : :
640 : : #ifdef EXEC_BACKEND
641 : :
642 : : /*
643 : : * Initialize backend-local pointers to all the shared data structures.
644 : : * (We couldn't do this until now because it needs LWLocks.)
645 : : */
646 : : if (IsUnderPostmaster)
647 : : AttachSharedMemoryStructs();
648 : : #endif
10141 scrappy@hub.org 649 :CBC 3224 : }
650 : :
651 : : /*
652 : : * Used from bufmgr to share the value of the buffer that Startup waits on,
653 : : * or to reset the value to "not waiting" (-1). This allows processing
654 : : * of recovery conflicts for buffer pins. Set is made before backends look
655 : : * at this value, so locking not required, especially since the set is
656 : : * an atomic integer set operation.
657 : : */
658 : : void
5195 simon@2ndQuadrant.co 659 :GBC 22 : SetStartupBufferPinWaitBufId(int bufid)
660 : : {
661 : : /* use volatile pointer to prevent code rearrangement */
662 : 22 : volatile PROC_HDR *procglobal = ProcGlobal;
663 : :
664 : 22 : procglobal->startupBufferPinWaitBufId = bufid;
665 : 22 : }
666 : :
667 : : /*
668 : : * Used by backends when they receive a request to check for buffer pin waits.
669 : : */
670 : : int
671 : 5 : GetStartupBufferPinWaitBufId(void)
672 : : {
673 : : /* use volatile pointer to prevent code rearrangement */
674 : 5 : volatile PROC_HDR *procglobal = ProcGlobal;
675 : :
4639 tgl@sss.pgh.pa.us 676 : 5 : return procglobal->startupBufferPinWaitBufId;
677 : : }
678 : :
679 : : /*
680 : : * Check whether there are at least N free PGPROC objects. If false is
681 : : * returned, *nfree will be set to the number of free PGPROC objects.
682 : : * Otherwise, *nfree will be set to n.
683 : : *
684 : : * Note: this is designed on the assumption that N will generally be small.
685 : : */
686 : : bool
450 rhaas@postgresql.org 687 :CBC 511 : HaveNFreeProcs(int n, int *nfree)
688 : : {
689 : : dlist_iter iter;
690 : :
691 [ - + ]: 511 : Assert(n > 0);
692 [ - + ]: 511 : Assert(nfree);
693 : :
6876 tgl@sss.pgh.pa.us 694 [ - + ]: 511 : SpinLockAcquire(ProcStructLock);
695 : :
450 rhaas@postgresql.org 696 : 511 : *nfree = 0;
452 andres@anarazel.de 697 [ + - + - ]: 1533 : dlist_foreach(iter, &ProcGlobal->freeProcs)
698 : : {
450 rhaas@postgresql.org 699 : 1533 : (*nfree)++;
700 [ + + ]: 1533 : if (*nfree == n)
452 andres@anarazel.de 701 : 511 : break;
702 : : }
703 : :
6876 tgl@sss.pgh.pa.us 704 : 511 : SpinLockRelease(ProcStructLock);
705 : :
450 rhaas@postgresql.org 706 : 511 : return (*nfree == n);
707 : : }
708 : :
709 : : /*
710 : : * Check if the current process is awaiting a lock.
711 : : */
712 : : bool
5174 simon@2ndQuadrant.co 713 :GBC 8 : IsWaitingForLock(void)
714 : : {
715 [ + + ]: 8 : if (lockAwaited == NULL)
716 : 6 : return false;
717 : :
718 : 2 : return true;
719 : : }
720 : :
721 : : /*
722 : : * Cancel any pending wait for lock, when aborting a transaction, and revert
723 : : * any strong lock count acquisition for a lock being acquired.
724 : : *
725 : : * (Normally, this would only happen if we accept a cancel/die
726 : : * interrupt while waiting; but an ereport(ERROR) before or during the lock
727 : : * wait is within the realm of possibility, too.)
728 : : */
729 : : void
4379 rhaas@postgresql.org 730 :CBC 460817 : LockErrorCleanup(void)
731 : : {
732 : : LWLock *partitionLock;
733 : : DisableTimeoutParams timeouts[2];
734 : :
3359 heikki.linnakangas@i 735 : 460817 : HOLD_INTERRUPTS();
736 : :
4379 rhaas@postgresql.org 737 : 460817 : AbortStrongLockAcquire();
738 : :
739 : : /* Nothing to do if we weren't waiting for a lock */
6699 tgl@sss.pgh.pa.us 740 [ + + ]: 460817 : if (lockAwaited == NULL)
741 : : {
3359 heikki.linnakangas@i 742 [ - + ]: 460779 : RESUME_INTERRUPTS();
5923 tgl@sss.pgh.pa.us 743 : 460779 : return;
744 : : }
745 : :
746 : : /*
747 : : * Turn off the deadlock and lock timeout timers, if they are still
748 : : * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
749 : : * indicator flag, since this function is executed before
750 : : * ProcessInterrupts when responding to SIGINT; else we'd lose the
751 : : * knowledge that the SIGINT came from a lock timeout and not an external
752 : : * source.
753 : : */
4047 754 : 38 : timeouts[0].id = DEADLOCK_TIMEOUT;
755 : 38 : timeouts[0].keep_indicator = false;
756 : 38 : timeouts[1].id = LOCK_TIMEOUT;
757 : 38 : timeouts[1].keep_indicator = true;
758 : 38 : disable_timeouts(timeouts, 2);
759 : :
760 : : /* Unlink myself from the wait queue, if on it (might not be anymore!) */
6475 761 : 38 : partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
6699 762 : 38 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
763 : :
452 andres@anarazel.de 764 [ + + ]: 38 : if (!dlist_node_is_detached(&MyProc->links))
765 : : {
766 : : /* We could not have been granted the lock yet */
6475 tgl@sss.pgh.pa.us 767 : 37 : RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
768 : : }
769 : : else
770 : : {
771 : : /*
772 : : * Somebody kicked us off the lock queue already. Perhaps they
773 : : * granted us the lock, or perhaps they detected a deadlock. If they
774 : : * did grant us the lock, we'd better remember it in our local lock
775 : : * table.
776 : : */
1397 peter@eisentraut.org 777 [ + - ]: 1 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
7170 tgl@sss.pgh.pa.us 778 : 1 : GrantAwaitedLock();
779 : : }
780 : :
6699 781 : 38 : lockAwaited = NULL;
782 : :
783 : 38 : LWLockRelease(partitionLock);
784 : :
3359 heikki.linnakangas@i 785 [ - + ]: 38 : RESUME_INTERRUPTS();
786 : : }
787 : :
788 : :
789 : : /*
790 : : * ProcReleaseLocks() -- release locks associated with current transaction
791 : : * at main transaction commit or abort
792 : : *
793 : : * At main transaction commit, we release standard locks except session locks.
794 : : * At main transaction abort, we release all locks including session locks.
795 : : *
796 : : * Advisory locks are released only if they are transaction-level;
797 : : * session-level holds remain, whether this is a commit or not.
798 : : *
799 : : * At subtransaction commit, we don't release any locks (so this func is not
800 : : * needed at all); we will defer the releasing to the parent transaction.
801 : : * At subtransaction abort, we release all locks held by the subtransaction;
802 : : * this is implemented by retail releasing of the locks under control of
803 : : * the ResourceOwner mechanism.
804 : : */
805 : : void
7211 tgl@sss.pgh.pa.us 806 : 432915 : ProcReleaseLocks(bool isCommit)
807 : : {
9716 bruce@momjian.us 808 [ - + ]: 432915 : if (!MyProc)
9716 bruce@momjian.us 809 :UBC 0 : return;
810 : : /* If waiting, get off wait queue (should only be needed after error) */
4379 rhaas@postgresql.org 811 :CBC 432915 : LockErrorCleanup();
812 : : /* Release standard locks, including session-level if aborting */
7170 tgl@sss.pgh.pa.us 813 : 432915 : LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
814 : : /* Release transaction-level advisory locks */
4804 itagaki.takahiro@gma 815 : 432915 : LockReleaseAll(USER_LOCKMETHOD, false);
816 : : }
817 : :
818 : :
819 : : /*
820 : : * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
821 : : */
822 : : static void
6675 tgl@sss.pgh.pa.us 823 : 15787 : RemoveProcFromArray(int code, Datum arg)
824 : : {
825 [ - + ]: 15787 : Assert(MyProc != NULL);
6063 826 : 15787 : ProcArrayRemove(MyProc, InvalidTransactionId);
6675 827 : 15787 : }
828 : :
829 : : /*
830 : : * ProcKill() -- Destroy the per-proc data structure for
831 : : * this process. Release any of its held LW locks.
832 : : */
833 : : static void
7429 peter_e@gmx.net 834 : 15796 : ProcKill(int code, Datum arg)
835 : : {
836 : : PGPROC *proc;
837 : : dlist_head *procgloballist;
838 : :
8255 tgl@sss.pgh.pa.us 839 [ - + ]: 15796 : Assert(MyProc != NULL);
840 : :
841 : : /* not safe if forked by system(), etc. */
180 nathan@postgresql.or 842 [ - + ]: 15796 : if (MyProc->pid != (int) getpid())
180 nathan@postgresql.or 843 [ # # ]:UBC 0 : elog(PANIC, "ProcKill() called in child process");
844 : :
845 : : /* Make sure we're out of the sync rep lists */
4631 tgl@sss.pgh.pa.us 846 :CBC 15796 : SyncRepCleanupAtProcExit();
847 : :
848 : : #ifdef USE_ASSERT_CHECKING
849 : : {
850 : : int i;
851 : :
852 : : /* Last process should have released all locks. */
4548 rhaas@postgresql.org 853 [ + + ]: 268532 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
452 andres@anarazel.de 854 [ - + ]: 252736 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
855 : : }
856 : : #endif
857 : :
858 : : /*
859 : : * Release any LW locks I am holding. There really shouldn't be any, but
860 : : * it's cheap to check again before we cut the knees off the LWLock
861 : : * facility by releasing our PGPROC ...
862 : : */
6824 tgl@sss.pgh.pa.us 863 : 15796 : LWLockReleaseAll();
864 : :
865 : : /* Cancel any pending condition variable sleep, too */
2700 rhaas@postgresql.org 866 : 15796 : ConditionVariableCancelSleep();
867 : :
868 : : /*
869 : : * Detach from any lock group of which we are a member. If the leader
870 : : * exits before all other group members, its PGPROC will remain allocated
871 : : * until the last group process exits; that process must return the
872 : : * leader's PGPROC to the appropriate list.
873 : : */
2989 874 [ + + ]: 15796 : if (MyProc->lockGroupLeader != NULL)
875 : : {
876 : 1381 : PGPROC *leader = MyProc->lockGroupLeader;
877 : 1381 : LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
878 : :
879 : 1381 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
880 [ - + ]: 1381 : Assert(!dlist_is_empty(&leader->lockGroupMembers));
881 : 1381 : dlist_delete(&MyProc->lockGroupLink);
882 [ + + ]: 1381 : if (dlist_is_empty(&leader->lockGroupMembers))
883 : : {
884 : 59 : leader->lockGroupLeader = NULL;
885 [ - + ]: 59 : if (leader != MyProc)
886 : : {
2989 rhaas@postgresql.org 887 :UBC 0 : procgloballist = leader->procgloballist;
888 : :
889 : : /* Leader exited first; return its PGPROC. */
890 [ # # ]: 0 : SpinLockAcquire(ProcStructLock);
452 andres@anarazel.de 891 : 0 : dlist_push_head(procgloballist, &leader->links);
2989 rhaas@postgresql.org 892 : 0 : SpinLockRelease(ProcStructLock);
893 : : }
894 : : }
2989 rhaas@postgresql.org 895 [ + - ]:CBC 1322 : else if (leader != MyProc)
896 : 1322 : MyProc->lockGroupLeader = NULL;
897 : 1381 : LWLockRelease(leader_lwlock);
898 : : }
899 : :
900 : : /*
901 : : * Reset MyLatch to the process local one. This is so that signal
902 : : * handlers et al can continue using the latch after the shared latch
903 : : * isn't ours anymore.
904 : : *
905 : : * Similarly, stop reporting wait events to MyProc->wait_event_info.
906 : : *
907 : : * After that clear MyProc and disown the shared latch.
908 : : */
3378 andres@anarazel.de 909 : 15796 : SwitchBackToLocalLatch();
1107 910 : 15796 : pgstat_reset_wait_event_storage();
911 : :
3726 rhaas@postgresql.org 912 : 15796 : proc = MyProc;
913 : 15796 : MyProc = NULL;
42 heikki.linnakangas@i 914 :GNC 15796 : MyProcNumber = INVALID_PROC_NUMBER;
3726 rhaas@postgresql.org 915 :CBC 15796 : DisownLatch(&proc->procLatch);
916 : :
917 : : /* Mark the proc no longer in use */
42 heikki.linnakangas@i 918 :GNC 15796 : proc->pid = 0;
919 : 15796 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
920 : 15796 : proc->vxid.lxid = InvalidTransactionId;
921 : :
3183 rhaas@postgresql.org 922 :CBC 15796 : procgloballist = proc->procgloballist;
8233 tgl@sss.pgh.pa.us 923 [ + + ]: 15796 : SpinLockAcquire(ProcStructLock);
924 : :
925 : : /*
926 : : * If we're still a member of a locking group, that means we're a leader
927 : : * which has somehow exited before its children. The last remaining child
928 : : * will release our PGPROC. Otherwise, release it now.
929 : : */
2989 rhaas@postgresql.org 930 [ + - ]: 15796 : if (proc->lockGroupLeader == NULL)
931 : : {
932 : : /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
933 [ - + ]: 15796 : Assert(dlist_is_empty(&proc->lockGroupMembers));
934 : :
935 : : /* Return PGPROC structure (and semaphore) to appropriate freelist */
452 andres@anarazel.de 936 : 15796 : dlist_push_tail(procgloballist, &proc->links);
937 : : }
938 : :
939 : : /* Update shared estimate of spins_per_delay */
3103 rhaas@postgresql.org 940 : 15796 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
941 : :
8233 tgl@sss.pgh.pa.us 942 : 15796 : SpinLockRelease(ProcStructLock);
943 : :
944 : : /*
945 : : * This process is no longer present in shared memory in any meaningful
946 : : * way, so tell the postmaster we've cleaned up acceptably well. (XXX
947 : : * autovac launcher should be included here someday)
948 : : *
949 : : * Slot sync worker is also not a postmaster child, so skip this shared
950 : : * memory related processing here.
951 : : */
41 heikki.linnakangas@i 952 [ + + + + ]:GNC 15796 : if (IsUnderPostmaster && !AmAutoVacuumLauncherProcess() &&
953 [ + + ]: 14062 : !AmLogicalSlotSyncWorkerProcess())
5458 tgl@sss.pgh.pa.us 954 :CBC 14058 : MarkPostmasterChildInactive();
955 : :
956 : : /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
6208 alvherre@alvh.no-ip. 957 [ + + ]: 15796 : if (AutovacuumLauncherPid != 0)
5340 tgl@sss.pgh.pa.us 958 : 26 : kill(AutovacuumLauncherPid, SIGUSR2);
8233 959 : 15796 : }
960 : :
961 : : /*
962 : : * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
963 : : * processes (bgwriter, etc). The PGPROC and sema are not released, only
964 : : * marked as not-in-use.
965 : : */
966 : : static void
6248 alvherre@alvh.no-ip. 967 : 2252 : AuxiliaryProcKill(int code, Datum arg)
968 : : {
7168 bruce@momjian.us 969 : 2252 : int proctype = DatumGetInt32(arg);
970 : : PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
971 : : PGPROC *proc;
972 : :
6248 alvherre@alvh.no-ip. 973 [ + - - + ]: 2252 : Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
974 : :
975 : : /* not safe if forked by system(), etc. */
180 nathan@postgresql.or 976 [ - + ]: 2252 : if (MyProc->pid != (int) getpid())
180 nathan@postgresql.or 977 [ # # ]:UBC 0 : elog(PANIC, "AuxiliaryProcKill() called in child process");
978 : :
6248 alvherre@alvh.no-ip. 979 :CBC 2252 : auxproc = &AuxiliaryProcs[proctype];
980 : :
981 [ - + ]: 2252 : Assert(MyProc == auxproc);
982 : :
983 : : /* Release any LW locks I am holding (see notes above) */
8233 tgl@sss.pgh.pa.us 984 : 2252 : LWLockReleaseAll();
985 : :
986 : : /* Cancel any pending condition variable sleep, too */
2700 rhaas@postgresql.org 987 : 2252 : ConditionVariableCancelSleep();
988 : :
989 : : /* look at the equivalent ProcKill() code for comments */
3378 andres@anarazel.de 990 : 2252 : SwitchBackToLocalLatch();
1107 991 : 2252 : pgstat_reset_wait_event_storage();
992 : :
3726 rhaas@postgresql.org 993 : 2252 : proc = MyProc;
994 : 2252 : MyProc = NULL;
42 heikki.linnakangas@i 995 :GNC 2252 : MyProcNumber = INVALID_PROC_NUMBER;
3726 rhaas@postgresql.org 996 :CBC 2252 : DisownLatch(&proc->procLatch);
997 : :
6760 tgl@sss.pgh.pa.us 998 [ + + ]: 2252 : SpinLockAcquire(ProcStructLock);
999 : :
1000 : : /* Mark auxiliary proc no longer in use */
3726 rhaas@postgresql.org 1001 : 2252 : proc->pid = 0;
42 heikki.linnakangas@i 1002 :GNC 2252 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1003 : 2252 : proc->vxid.lxid = InvalidTransactionId;
1004 : :
1005 : : /* Update shared estimate of spins_per_delay */
6760 tgl@sss.pgh.pa.us 1006 :CBC 2252 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1007 : :
1008 : 2252 : SpinLockRelease(ProcStructLock);
10141 scrappy@hub.org 1009 : 2252 : }
1010 : :
1011 : : /*
1012 : : * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1013 : : * given its PID
1014 : : *
1015 : : * Returns NULL if not found.
1016 : : */
1017 : : PGPROC *
2576 rhaas@postgresql.org 1018 : 2303 : AuxiliaryPidGetProc(int pid)
1019 : : {
1020 : 2303 : PGPROC *result = NULL;
1021 : : int index;
1022 : :
1023 [ - + ]: 2303 : if (pid == 0) /* never match dummy PGPROCs */
2576 rhaas@postgresql.org 1024 :UBC 0 : return NULL;
1025 : :
2576 rhaas@postgresql.org 1026 [ + - ]:CBC 4729 : for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1027 : : {
1028 : 4729 : PGPROC *proc = &AuxiliaryProcs[index];
1029 : :
1030 [ + + ]: 4729 : if (proc->pid == pid)
1031 : : {
1032 : 2303 : result = proc;
1033 : 2303 : break;
1034 : : }
1035 : : }
1036 : 2303 : return result;
1037 : : }
1038 : :
1039 : :
1040 : : /*
1041 : : * ProcSleep -- put a process to sleep on the specified lock
1042 : : *
1043 : : * Caller must have set MyProc->heldLocks to reflect locks already held
1044 : : * on the lockable object by this process (under all XIDs).
1045 : : *
1046 : : * It's not actually guaranteed that we need to wait when this function is
1047 : : * called, because it could be that when we try to find a position at which
1048 : : * to insert ourself into the wait queue, we discover that we must be inserted
1049 : : * ahead of everyone who wants a lock that conflict with ours. In that case,
1050 : : * we get the lock immediately. Beause of this, it's sensible for this function
1051 : : * to have a dontWait argument, despite the name.
1052 : : *
1053 : : * The lock table's partition lock must be held at entry, and will be held
1054 : : * at exit.
1055 : : *
1056 : : * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR
1057 : : * if not (if dontWait = true, this is a deadlock; if dontWait = false, we
1058 : : * would have had to wait).
1059 : : *
1060 : : * ASSUME: that no one will fiddle with the queue until after
1061 : : * we release the partition lock.
1062 : : *
1063 : : * NOTES: The process queue is now a priority queue for locking.
1064 : : */
1065 : : ProcWaitStatus
31 rhaas@postgresql.org 1066 :GNC 1762 : ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1067 : : {
6699 tgl@sss.pgh.pa.us 1068 :CBC 1762 : LOCKMODE lockmode = locallock->tag.mode;
1069 : 1762 : LOCK *lock = locallock->lock;
1070 : 1762 : PROCLOCK *proclock = locallock->proclock;
6475 1071 : 1762 : uint32 hashcode = locallock->hashcode;
3730 rhaas@postgresql.org 1072 : 1762 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
452 andres@anarazel.de 1073 : 1762 : dclist_head *waitQueue = &lock->waitProcs;
331 tgl@sss.pgh.pa.us 1074 : 1762 : PGPROC *insert_before = NULL;
7440 bruce@momjian.us 1075 : 1762 : LOCKMASK myHeldLocks = MyProc->heldLocks;
1192 fujii@postgresql.org 1076 : 1762 : TimestampTz standbyWaitStart = 0;
8258 tgl@sss.pgh.pa.us 1077 : 1762 : bool early_deadlock = false;
5995 bruce@momjian.us 1078 : 1762 : bool allow_autovacuum_cancel = true;
1192 fujii@postgresql.org 1079 : 1762 : bool logged_recovery_conflict = false;
1080 : : ProcWaitStatus myWaitStatus;
2989 rhaas@postgresql.org 1081 : 1762 : PGPROC *leader = MyProc->lockGroupLeader;
1082 : :
1083 : : /*
1084 : : * If group locking is in use, locks held by members of my locking group
1085 : : * need to be included in myHeldLocks. This is not required for relation
1086 : : * extension lock which conflict among group members. However, including
1087 : : * them in myHeldLocks will give group members the priority to get those
1088 : : * locks as compared to other backends which are also trying to acquire
1089 : : * those locks. OTOH, we can avoid giving priority to group members for
1090 : : * that kind of locks, but there doesn't appear to be a clear advantage of
1091 : : * the same.
1092 : : */
1093 [ + + ]: 1762 : if (leader != NULL)
1094 : : {
1095 : : dlist_iter iter;
1096 : :
452 andres@anarazel.de 1097 [ + - + + ]: 30 : dlist_foreach(iter, &lock->procLocks)
1098 : : {
1099 : : PROCLOCK *otherproclock;
1100 : :
1101 : 23 : otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1102 : :
2989 rhaas@postgresql.org 1103 [ + + ]: 23 : if (otherproclock->groupLeader == leader)
1104 : 11 : myHeldLocks |= otherproclock->holdMask;
1105 : : }
1106 : : }
1107 : :
1108 : : /*
1109 : : * Determine where to add myself in the wait queue.
1110 : : *
1111 : : * Normally I should go at the end of the queue. However, if I already
1112 : : * hold locks that conflict with the request of any previous waiter, put
1113 : : * myself in the queue just in front of the first such waiter. This is not
1114 : : * a necessary step, since deadlock detection would move me to before that
1115 : : * waiter anyway; but it's relatively cheap to detect such a conflict
1116 : : * immediately, and avoid delaying till deadlock timeout.
1117 : : *
1118 : : * Special case: if I find I should go in front of some waiter, check to
1119 : : * see if I conflict with already-held locks or the requests before that
1120 : : * waiter. If not, then just grant myself the requested lock immediately.
1121 : : * This is the same as the test for immediate grant in LockAcquire, except
1122 : : * we are only considering the part of the wait queue before my insertion
1123 : : * point.
1124 : : */
452 andres@anarazel.de 1125 [ + + + + ]: 1762 : if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1126 : : {
7440 bruce@momjian.us 1127 : 7 : LOCKMASK aheadRequests = 0;
1128 : : dlist_iter iter;
1129 : :
452 andres@anarazel.de 1130 [ + - + - ]: 7 : dclist_foreach(iter, waitQueue)
1131 : : {
1132 : 7 : PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1133 : :
1134 : : /*
1135 : : * If we're part of the same locking group as this waiter, its
1136 : : * locks neither conflict with ours nor contribute to
1137 : : * aheadRequests.
1138 : : */
2989 rhaas@postgresql.org 1139 [ - + - - ]: 7 : if (leader != NULL && leader == proc->lockGroupLeader)
2989 rhaas@postgresql.org 1140 :UBC 0 : continue;
1141 : :
1142 : : /* Must he wait for me? */
7941 bruce@momjian.us 1143 [ + - ]:CBC 7 : if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1144 : : {
1145 : : /* Must I wait for him ? */
1146 [ + + ]: 7 : if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1147 : : {
1148 : : /*
1149 : : * Yes, so we have a deadlock. Easiest way to clean up
1150 : : * correctly is to call RemoveFromWaitQueue(), but we
1151 : : * can't do that until we are *on* the wait queue. So, set
1152 : : * a flag to check below, and break out of loop. Also,
1153 : : * record deadlock info for later message.
1154 : : */
7759 tgl@sss.pgh.pa.us 1155 : 1 : RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
8258 1156 : 1 : early_deadlock = true;
1157 : 1 : break;
1158 : : }
1159 : : /* I must go before this waiter. Check special case. */
7941 bruce@momjian.us 1160 [ + - ]: 6 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1568 peter@eisentraut.org 1161 [ + - ]: 6 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1162 : : proclock))
1163 : : {
1164 : : /* Skip the wait and just grant myself the lock. */
7726 bruce@momjian.us 1165 : 6 : GrantLock(lock, proclock, lockmode);
7170 tgl@sss.pgh.pa.us 1166 : 6 : GrantAwaitedLock();
1397 peter@eisentraut.org 1167 : 6 : return PROC_WAIT_STATUS_OK;
1168 : : }
1169 : :
1170 : : /* Put myself into wait queue before conflicting process */
452 andres@anarazel.de 1171 :UBC 0 : insert_before = proc;
9109 vadim4o@yahoo.com 1172 : 0 : break;
1173 : : }
1174 : : /* Nope, so advance to next waiter */
7440 bruce@momjian.us 1175 : 0 : aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1176 : : }
1177 : : }
1178 : :
1179 : : /*
1180 : : * At this point we know that we'd really need to sleep. If we've been
1181 : : * commanded not to do that, bail out.
1182 : : */
31 rhaas@postgresql.org 1183 [ + + ]:GNC 1756 : if (dontWait)
1184 : 652 : return PROC_WAIT_STATUS_ERROR;
1185 : :
1186 : : /*
1187 : : * Insert self into queue, at the position determined above.
1188 : : */
452 andres@anarazel.de 1189 [ - + ]:CBC 1104 : if (insert_before)
452 andres@anarazel.de 1190 :UBC 0 : dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1191 : : else
452 andres@anarazel.de 1192 :CBC 1104 : dclist_push_tail(waitQueue, &MyProc->links);
1193 : :
7440 bruce@momjian.us 1194 : 1104 : lock->waitMask |= LOCKBIT_ON(lockmode);
1195 : :
1196 : : /* Set up wait information in PGPROC object, too */
8483 tgl@sss.pgh.pa.us 1197 : 1104 : MyProc->waitLock = lock;
7170 1198 : 1104 : MyProc->waitProcLock = proclock;
8483 1199 : 1104 : MyProc->waitLockMode = lockmode;
1200 : :
1397 peter@eisentraut.org 1201 : 1104 : MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
1202 : :
1203 : : /*
1204 : : * If we detected deadlock, give up without waiting. This must agree with
1205 : : * CheckDeadLock's recovery code.
1206 : : */
8258 tgl@sss.pgh.pa.us 1207 [ + + ]: 1104 : if (early_deadlock)
1208 : : {
6475 1209 : 1 : RemoveFromWaitQueue(MyProc, hashcode);
1397 peter@eisentraut.org 1210 : 1 : return PROC_WAIT_STATUS_ERROR;
1211 : : }
1212 : :
1213 : : /* mark that we are waiting for a lock */
6699 tgl@sss.pgh.pa.us 1214 : 1103 : lockAwaited = locallock;
1215 : :
1216 : : /*
1217 : : * Release the lock table's partition lock.
1218 : : *
1219 : : * NOTE: this may also cause us to exit critical-section state, possibly
1220 : : * allowing a cancel/die interrupt to be accepted. This is OK because we
1221 : : * have recorded the fact that we are waiting for a lock, and so
1222 : : * LockErrorCleanup will clean up if cancel/die happens.
1223 : : */
1224 : 1103 : LWLockRelease(partitionLock);
1225 : :
1226 : : /*
1227 : : * Also, now that we will successfully clean up after an ereport, it's
1228 : : * safe to check to see if there's a buffer pin deadlock against the
1229 : : * Startup process. Of course, that's only necessary if we're doing Hot
1230 : : * Standby and are not the Startup process ourselves.
1231 : : */
4639 1232 [ + + + + ]: 1103 : if (RecoveryInProgress() && !InRecovery)
4639 tgl@sss.pgh.pa.us 1233 :GBC 1 : CheckRecoveryConflictDeadlock();
1234 : :
1235 : : /* Reset deadlock_state before enabling the timeout handler */
6144 tgl@sss.pgh.pa.us 1236 :CBC 1103 : deadlock_state = DS_NOT_YET_CHECKED;
3358 andres@anarazel.de 1237 : 1103 : got_deadlock_timeout = false;
1238 : :
1239 : : /*
1240 : : * Set timer so we can wake up after awhile and check for a deadlock. If a
1241 : : * deadlock is detected, the handler sets MyProc->waitStatus =
1242 : : * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1243 : : * rather than success.
1244 : : *
1245 : : * By delaying the check until we've waited for a bit, we can avoid
1246 : : * running the rather expensive deadlock-check code in most cases.
1247 : : *
1248 : : * If LockTimeout is set, also enable the timeout for that. We can save a
1249 : : * few cycles by enabling both timeout sources in one call.
1250 : : *
1251 : : * If InHotStandby we set lock waits slightly later for clarity with other
1252 : : * code.
1253 : : */
2957 simon@2ndQuadrant.co 1254 [ + + ]: 1103 : if (!InHotStandby)
1255 : : {
1256 [ + + ]: 1102 : if (LockTimeout > 0)
1257 : : {
1258 : : EnableTimeoutParams timeouts[2];
1259 : :
1260 : 115 : timeouts[0].id = DEADLOCK_TIMEOUT;
1261 : 115 : timeouts[0].type = TMPARAM_AFTER;
1262 : 115 : timeouts[0].delay_ms = DeadlockTimeout;
1263 : 115 : timeouts[1].id = LOCK_TIMEOUT;
1264 : 115 : timeouts[1].type = TMPARAM_AFTER;
1265 : 115 : timeouts[1].delay_ms = LockTimeout;
1266 : 115 : enable_timeouts(timeouts, 2);
1267 : : }
1268 : : else
1269 : 987 : enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
1270 : :
1271 : : /*
1272 : : * Use the current time obtained for the deadlock timeout timer as
1273 : : * waitStart (i.e., the time when this process started waiting for the
1274 : : * lock). Since getting the current time newly can cause overhead, we
1275 : : * reuse the already-obtained time to avoid that overhead.
1276 : : *
1277 : : * Note that waitStart is updated without holding the lock table's
1278 : : * partition lock, to avoid the overhead by additional lock
1279 : : * acquisition. This can cause "waitstart" in pg_locks to become NULL
1280 : : * for a very short period of time after the wait started even though
1281 : : * "granted" is false. This is OK in practice because we can assume
1282 : : * that users are likely to look at "waitstart" when waiting for the
1283 : : * lock for a long time.
1284 : : */
1154 fujii@postgresql.org 1285 : 1102 : pg_atomic_write_u64(&MyProc->waitStart,
1286 : 1102 : get_timeout_start_time(DEADLOCK_TIMEOUT));
1287 : : }
1192 fujii@postgresql.org 1288 [ + - ]:GBC 1 : else if (log_recovery_conflict_waits)
1289 : : {
1290 : : /*
1291 : : * Set the wait start timestamp if logging is enabled and in hot
1292 : : * standby.
1293 : : */
1294 : 1 : standbyWaitStart = GetCurrentTimestamp();
1295 : : }
1296 : :
1297 : : /*
1298 : : * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1299 : : * will not wait. But a set latch does not necessarily mean that the lock
1300 : : * is free now, as there are many other sources for latch sets than
1301 : : * somebody releasing the lock.
1302 : : *
1303 : : * We process interrupts whenever the latch has been set, so cancel/die
1304 : : * interrupts are processed quickly. This means we must not mind losing
1305 : : * control to a cancel/die interrupt here. We don't, because we have no
1306 : : * shared-state-change work to do after being granted the lock (the
1307 : : * grantor did it all). We do have to worry about canceling the deadlock
1308 : : * timeout and updating the locallock table, but if we lose control to an
1309 : : * error, LockErrorCleanup will fix that up.
1310 : : */
1311 : : do
1312 : : {
2957 simon@2ndQuadrant.co 1313 [ + + ]:CBC 3368 : if (InHotStandby)
1314 : : {
1192 fujii@postgresql.org 1315 :GBC 4 : bool maybe_log_conflict =
331 tgl@sss.pgh.pa.us 1316 [ + - + + ]: 4 : (standbyWaitStart != 0 && !logged_recovery_conflict);
1317 : :
1318 : : /* Set a timer and wait for that or for the lock to be granted */
1192 fujii@postgresql.org 1319 : 4 : ResolveRecoveryConflictWithLock(locallock->tag.lock,
1320 : : maybe_log_conflict);
1321 : :
1322 : : /*
1323 : : * Emit the log message if the startup process is waiting longer
1324 : : * than deadlock_timeout for recovery conflict on lock.
1325 : : */
1326 [ + + ]: 4 : if (maybe_log_conflict)
1327 : : {
1328 : 2 : TimestampTz now = GetCurrentTimestamp();
1329 : :
1330 [ + + ]: 2 : if (TimestampDifferenceExceeds(standbyWaitStart, now,
1331 : : DeadlockTimeout))
1332 : : {
1333 : : VirtualTransactionId *vxids;
1334 : : int cnt;
1335 : :
1336 : 1 : vxids = GetLockConflicts(&locallock->tag.lock,
1337 : : AccessExclusiveLock, &cnt);
1338 : :
1339 : : /*
1340 : : * Log the recovery conflict and the list of PIDs of
1341 : : * backends holding the conflicting lock. Note that we do
1342 : : * logging even if there are no such backends right now
1343 : : * because the startup process here has already waited
1344 : : * longer than deadlock_timeout.
1345 : : */
1192 fujii@postgresql.org 1346 :UBC 0 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1347 : : standbyWaitStart, now,
1187 fujii@postgresql.org 1348 [ + - ]:GBC 1 : cnt > 0 ? vxids : NULL, true);
1192 1349 : 1 : logged_recovery_conflict = true;
1350 : : }
1351 : : }
1352 : : }
1353 : : else
1354 : : {
1969 tmunro@postgresql.or 1355 :CBC 3364 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1356 : 3364 : PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
2957 simon@2ndQuadrant.co 1357 : 3364 : ResetLatch(MyLatch);
1358 : : /* check for deadlocks first, as that's probably log-worthy */
1359 [ + + ]: 3364 : if (got_deadlock_timeout)
1360 : : {
1361 : 34 : CheckDeadLock();
1362 : 34 : got_deadlock_timeout = false;
1363 : : }
1364 [ + + ]: 3364 : CHECK_FOR_INTERRUPTS();
1365 : : }
1366 : :
1367 : : /*
1368 : : * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1369 : : * else asynchronously. Read it just once per loop to prevent
1370 : : * surprising behavior (such as missing log messages).
1371 : : */
1397 peter@eisentraut.org 1372 : 3329 : myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1373 : :
1374 : : /*
1375 : : * If we are not deadlocked, but are waiting on an autovacuum-induced
1376 : : * task, send a signal to interrupt it.
1377 : : */
6015 alvherre@alvh.no-ip. 1378 [ - + - - ]: 3329 : if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1379 : : {
5995 bruce@momjian.us 1380 :LBC (1) : PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1381 : : uint8 statusFlags;
1382 : : uint8 lockmethod_copy;
1383 : : LOCKTAG locktag_copy;
1384 : :
1385 : : /*
1386 : : * Grab info we need, then release lock immediately. Note this
1387 : : * coding means that there is a tiny chance that the process
1388 : : * terminates its current transaction and starts a different one
1389 : : * before we have a change to send the signal; the worst possible
1390 : : * consequence is that a for-wraparound vacuum is canceled. But
1391 : : * that could happen in any case unless we were to do kill() with
1392 : : * the lock held, which is much more undesirable.
1393 : : */
6015 alvherre@alvh.no-ip. 1394 : (1) : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1238 1395 : (1) : statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1396 : (1) : lockmethod_copy = lock->tag.locktag_lockmethodid;
1397 : (1) : locktag_copy = lock->tag;
1398 : (1) : LWLockRelease(ProcArrayLock);
1399 : :
1400 : : /*
1401 : : * Only do it if the worker is not working to protect against Xid
1402 : : * wraparound.
1403 : : */
1245 1404 [ # # ]: (1) : if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1405 [ # # ]: (1) : !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1406 : : {
5995 bruce@momjian.us 1407 : (1) : int pid = autovac->pid;
1408 : :
1409 : : /* report the case, if configured to do so */
1238 tgl@sss.pgh.pa.us 1410 [ # # ]: (1) : if (message_level_is_interesting(DEBUG1))
1411 : : {
1412 : : StringInfoData locktagbuf;
1413 : : StringInfoData logbuf; /* errdetail for server log */
1414 : :
1238 tgl@sss.pgh.pa.us 1415 :UBC 0 : initStringInfo(&locktagbuf);
1416 : 0 : initStringInfo(&logbuf);
1417 : 0 : DescribeLockTag(&locktagbuf, &locktag_copy);
1418 : 0 : appendStringInfo(&logbuf,
1419 : : "Process %d waits for %s on %s.",
1420 : : MyProcPid,
1421 : : GetLockmodeName(lockmethod_copy, lockmode),
1422 : : locktagbuf.data);
1423 : :
1424 [ # # ]: 0 : ereport(DEBUG1,
1425 : : (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1426 : : pid),
1427 : : errdetail_log("%s", logbuf.data)));
1428 : :
1429 : 0 : pfree(locktagbuf.data);
1430 : 0 : pfree(logbuf.data);
1431 : : }
1432 : :
1433 : : /* send the autovacuum worker Back to Old Kent Road */
6015 alvherre@alvh.no-ip. 1434 [ # # ]:LBC (1) : if (kill(pid, SIGINT) < 0)
1435 : : {
1436 : : /*
1437 : : * There's a race condition here: once we release the
1438 : : * ProcArrayLock, it's possible for the autovac worker to
1439 : : * close up shop and exit before we can do the kill().
1440 : : * Therefore, we do not whinge about no-such-process.
1441 : : * Other errors such as EPERM could conceivably happen if
1442 : : * the kernel recycles the PID fast enough, but such cases
1443 : : * seem improbable enough that it's probably best to issue
1444 : : * a warning if we see some other errno.
1445 : : */
3183 tgl@sss.pgh.pa.us 1446 [ # # ]:UBC 0 : if (errno != ESRCH)
1447 [ # # ]: 0 : ereport(WARNING,
1448 : : (errmsg("could not send signal to process %d: %m",
1449 : : pid)));
1450 : : }
1451 : : }
1452 : :
1453 : : /* prevent signal from being sent again more than once */
6015 alvherre@alvh.no-ip. 1454 :LBC (1) : allow_autovacuum_cancel = false;
1455 : : }
1456 : :
1457 : : /*
1458 : : * If awoken after the deadlock check interrupt has run, and
1459 : : * log_lock_waits is on, then report about the wait.
1460 : : */
6074 tgl@sss.pgh.pa.us 1461 [ + + + + ]:CBC 3329 : if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1462 : : {
1463 : : StringInfoData buf,
1464 : : lock_waiters_sbuf,
1465 : : lock_holders_sbuf;
1466 : : const char *modename;
1467 : : long secs;
1468 : : int usecs;
1469 : : long msecs;
1470 : : dlist_iter proc_iter;
1471 : : PROCLOCK *curproclock;
3685 fujii@postgresql.org 1472 : 18 : bool first_holder = true,
1473 : 18 : first_waiter = true;
1474 : 18 : int lockHoldersNum = 0;
1475 : :
6074 tgl@sss.pgh.pa.us 1476 : 18 : initStringInfo(&buf);
3685 fujii@postgresql.org 1477 : 18 : initStringInfo(&lock_waiters_sbuf);
1478 : 18 : initStringInfo(&lock_holders_sbuf);
1479 : :
6074 tgl@sss.pgh.pa.us 1480 : 18 : DescribeLockTag(&buf, &locallock->tag.lock);
1481 : 18 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1482 : : lockmode);
4290 alvherre@alvh.no-ip. 1483 : 18 : TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
1484 : : GetCurrentTimestamp(),
1485 : : &secs, &usecs);
6074 tgl@sss.pgh.pa.us 1486 : 18 : msecs = secs * 1000 + usecs / 1000;
1487 : 18 : usecs = usecs % 1000;
1488 : :
1489 : : /*
1490 : : * we loop over the lock's procLocks to gather a list of all
1491 : : * holders and waiters. Thus we will be able to provide more
1492 : : * detailed information for lock debugging purposes.
1493 : : *
1494 : : * lock->procLocks contains all processes which hold or wait for
1495 : : * this lock.
1496 : : */
1497 : :
3685 fujii@postgresql.org 1498 : 18 : LWLockAcquire(partitionLock, LW_SHARED);
1499 : :
452 andres@anarazel.de 1500 [ + - + + ]: 61 : dlist_foreach(proc_iter, &lock->procLocks)
1501 : : {
1502 : 43 : curproclock =
1503 : 43 : dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1504 : :
1505 : : /*
1506 : : * we are a waiter if myProc->waitProcLock == curproclock; we
1507 : : * are a holder if it is NULL or something different
1508 : : */
557 drowley@postgresql.o 1509 [ + + ]: 43 : if (curproclock->tag.myProc->waitProcLock == curproclock)
1510 : : {
3685 fujii@postgresql.org 1511 [ + + ]: 20 : if (first_waiter)
1512 : : {
1513 : 10 : appendStringInfo(&lock_waiters_sbuf, "%d",
557 drowley@postgresql.o 1514 : 10 : curproclock->tag.myProc->pid);
3685 fujii@postgresql.org 1515 : 10 : first_waiter = false;
1516 : : }
1517 : : else
1518 : 10 : appendStringInfo(&lock_waiters_sbuf, ", %d",
557 drowley@postgresql.o 1519 : 10 : curproclock->tag.myProc->pid);
1520 : : }
1521 : : else
1522 : : {
3685 fujii@postgresql.org 1523 [ + + ]: 23 : if (first_holder)
1524 : : {
1525 : 18 : appendStringInfo(&lock_holders_sbuf, "%d",
557 drowley@postgresql.o 1526 : 18 : curproclock->tag.myProc->pid);
3685 fujii@postgresql.org 1527 : 18 : first_holder = false;
1528 : : }
1529 : : else
1530 : 5 : appendStringInfo(&lock_holders_sbuf, ", %d",
557 drowley@postgresql.o 1531 : 5 : curproclock->tag.myProc->pid);
1532 : :
3685 fujii@postgresql.org 1533 : 23 : lockHoldersNum++;
1534 : : }
1535 : : }
1536 : :
1537 : 18 : LWLockRelease(partitionLock);
1538 : :
6074 tgl@sss.pgh.pa.us 1539 [ + + ]: 18 : if (deadlock_state == DS_SOFT_DEADLOCK)
1540 [ + - ]: 3 : ereport(LOG,
1541 : : (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1542 : : MyProcPid, modename, buf.data, msecs, usecs),
1543 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1544 : : "Processes holding the lock: %s. Wait queue: %s.",
1545 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1546 [ + + ]: 15 : else if (deadlock_state == DS_HARD_DEADLOCK)
1547 : : {
1548 : : /*
1549 : : * This message is a bit redundant with the error that will be
1550 : : * reported subsequently, but in some cases the error report
1551 : : * might not make it to the log (eg, if it's caught by an
1552 : : * exception handler), and we want to ensure all long-wait
1553 : : * events get logged.
1554 : : */
1555 [ + - ]: 1 : ereport(LOG,
1556 : : (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1557 : : MyProcPid, modename, buf.data, msecs, usecs),
1558 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1559 : : "Processes holding the lock: %s. Wait queue: %s.",
1560 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1561 : : }
1562 : :
1397 peter@eisentraut.org 1563 [ + + ]: 18 : if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
6074 tgl@sss.pgh.pa.us 1564 [ + - ]: 9 : ereport(LOG,
1565 : : (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1566 : : MyProcPid, modename, buf.data, msecs, usecs),
1567 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1568 : : "Processes holding the lock: %s. Wait queue: %s.",
1569 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1397 peter@eisentraut.org 1570 [ + + ]: 9 : else if (myWaitStatus == PROC_WAIT_STATUS_OK)
6074 tgl@sss.pgh.pa.us 1571 [ + - ]: 8 : ereport(LOG,
1572 : : (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1573 : : MyProcPid, modename, buf.data, msecs, usecs)));
1574 : : else
1575 : : {
1397 peter@eisentraut.org 1576 [ - + ]: 1 : Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1577 : :
1578 : : /*
1579 : : * Currently, the deadlock checker always kicks its own
1580 : : * process, which means that we'll only see
1581 : : * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1582 : : * DS_HARD_DEADLOCK, and there's no need to print redundant
1583 : : * messages. But for completeness and future-proofing, print
1584 : : * a message if it looks like someone else kicked us off the
1585 : : * lock.
1586 : : */
6074 tgl@sss.pgh.pa.us 1587 [ - + ]: 1 : if (deadlock_state != DS_HARD_DEADLOCK)
6074 tgl@sss.pgh.pa.us 1588 [ # # ]:UBC 0 : ereport(LOG,
1589 : : (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1590 : : MyProcPid, modename, buf.data, msecs, usecs),
1591 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1592 : : "Processes holding the lock: %s. Wait queue: %s.",
1593 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1594 : : }
1595 : :
1596 : : /*
1597 : : * At this point we might still need to wait for the lock. Reset
1598 : : * state so we don't print the above messages again.
1599 : : */
6074 tgl@sss.pgh.pa.us 1600 :CBC 18 : deadlock_state = DS_NO_DEADLOCK;
1601 : :
1602 : 18 : pfree(buf.data);
3685 fujii@postgresql.org 1603 : 18 : pfree(lock_holders_sbuf.data);
1604 : 18 : pfree(lock_waiters_sbuf.data);
1605 : : }
1397 peter@eisentraut.org 1606 [ + + ]: 3329 : } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1607 : :
1608 : : /*
1609 : : * Disable the timers, if they are still running. As in LockErrorCleanup,
1610 : : * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1611 : : * already caused QueryCancelPending to become set, we want the cancel to
1612 : : * be reported as a lock timeout, not a user cancel.
1613 : : */
2957 simon@2ndQuadrant.co 1614 [ + + ]: 1064 : if (!InHotStandby)
1615 : : {
1616 [ + + ]: 1063 : if (LockTimeout > 0)
1617 : : {
1618 : : DisableTimeoutParams timeouts[2];
1619 : :
1620 : 109 : timeouts[0].id = DEADLOCK_TIMEOUT;
1621 : 109 : timeouts[0].keep_indicator = false;
1622 : 109 : timeouts[1].id = LOCK_TIMEOUT;
1623 : 109 : timeouts[1].keep_indicator = true;
1624 : 109 : disable_timeouts(timeouts, 2);
1625 : : }
1626 : : else
1627 : 954 : disable_timeout(DEADLOCK_TIMEOUT, false);
1628 : : }
1629 : :
1630 : : /*
1631 : : * Emit the log message if recovery conflict on lock was resolved but the
1632 : : * startup process waited longer than deadlock_timeout for it.
1633 : : */
1187 fujii@postgresql.org 1634 [ + + + - ]: 1064 : if (InHotStandby && logged_recovery_conflict)
1187 fujii@postgresql.org 1635 :GBC 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1636 : : standbyWaitStart, GetCurrentTimestamp(),
1637 : : NULL, false);
1638 : :
1639 : : /*
1640 : : * Re-acquire the lock table's partition lock. We have to do this to hold
1641 : : * off cancel/die interrupts before we can mess with lockAwaited (else we
1642 : : * might have a missed or duplicated locallock update).
1643 : : */
6699 tgl@sss.pgh.pa.us 1644 :CBC 1064 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1645 : :
1646 : : /*
1647 : : * We no longer want LockErrorCleanup to do anything.
1648 : : */
1649 : 1064 : lockAwaited = NULL;
1650 : :
1651 : : /*
1652 : : * If we got the lock, be sure to remember it in the locallock table.
1653 : : */
1397 peter@eisentraut.org 1654 [ + + ]: 1064 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
7170 tgl@sss.pgh.pa.us 1655 : 1060 : GrantAwaitedLock();
1656 : :
1657 : : /*
1658 : : * We don't have to do anything else, because the awaker did all the
1659 : : * necessary update of the lock table and MyProc.
1660 : : */
7211 1661 : 1064 : return MyProc->waitStatus;
1662 : : }
1663 : :
1664 : :
1665 : : /*
1666 : : * ProcWakeup -- wake up a process by setting its latch.
1667 : : *
1668 : : * Also remove the process from the wait queue and set its links invalid.
1669 : : *
1670 : : * The appropriate lock partition lock must be held by caller.
1671 : : *
1672 : : * XXX: presently, this code is only used for the "success" case, and only
1673 : : * works correctly for that case. To clean up in failure case, would need
1674 : : * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1675 : : * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1676 : : */
1677 : : void
1397 peter@eisentraut.org 1678 : 1062 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
1679 : : {
452 andres@anarazel.de 1680 [ - + ]: 1062 : if (dlist_node_is_detached(&proc->links))
452 andres@anarazel.de 1681 :UBC 0 : return;
1682 : :
1397 peter@eisentraut.org 1683 [ - + ]:CBC 1062 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1684 : :
1685 : : /* Remove process from wait queue */
452 andres@anarazel.de 1686 : 1062 : dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
1687 : :
1688 : : /* Clean up process' state and pass it the ok/fail signal */
8483 tgl@sss.pgh.pa.us 1689 : 1062 : proc->waitLock = NULL;
7170 1690 : 1062 : proc->waitProcLock = NULL;
7211 1691 : 1062 : proc->waitStatus = waitStatus;
1154 fujii@postgresql.org 1692 : 1062 : pg_atomic_write_u64(&MyProc->waitStart, 0);
1693 : :
1694 : : /* And awaken it */
3358 andres@anarazel.de 1695 : 1062 : SetLatch(&proc->procLatch);
1696 : : }
1697 : :
1698 : : /*
1699 : : * ProcLockWakeup -- routine for waking up processes when a lock is
1700 : : * released (or a prior waiter is aborted). Scan all waiters
1701 : : * for lock, waken any that are no longer blocked.
1702 : : *
1703 : : * The appropriate lock partition lock must be held by caller.
1704 : : */
1705 : : void
7440 bruce@momjian.us 1706 : 1058 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1707 : : {
452 andres@anarazel.de 1708 : 1058 : dclist_head *waitQueue = &lock->waitProcs;
7440 bruce@momjian.us 1709 : 1058 : LOCKMASK aheadRequests = 0;
1710 : : dlist_mutable_iter miter;
1711 : :
452 andres@anarazel.de 1712 [ + + ]: 1058 : if (dclist_is_empty(waitQueue))
8480 tgl@sss.pgh.pa.us 1713 : 41 : return;
1714 : :
452 andres@anarazel.de 1715 [ + - + + ]: 2205 : dclist_foreach_modify(miter, waitQueue)
1716 : : {
1717 : 1188 : PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
8424 bruce@momjian.us 1718 : 1188 : LOCKMODE lockmode = proc->waitLockMode;
1719 : :
1720 : : /*
1721 : : * Waken if (a) doesn't conflict with requests of earlier waiters, and
1722 : : * (b) doesn't conflict with already-held locks.
1723 : : */
7941 1724 [ + + ]: 1188 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1568 peter@eisentraut.org 1725 [ + + ]: 1135 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1726 : : proc->waitProcLock))
1727 : : {
1728 : : /* OK to waken */
7170 tgl@sss.pgh.pa.us 1729 : 1062 : GrantLock(lock, proc->waitProcLock, lockmode);
1730 : : /* removes proc from the lock's waiting process queue */
452 andres@anarazel.de 1731 : 1062 : ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1732 : : }
1733 : : else
1734 : : {
1735 : : /*
1736 : : * Lock conflicts: Don't wake, but remember requested mode for
1737 : : * later checks.
1738 : : */
7440 bruce@momjian.us 1739 : 126 : aheadRequests |= LOCKBIT_ON(lockmode);
1740 : : }
1741 : : }
1742 : : }
1743 : :
1744 : : /*
1745 : : * CheckDeadLock
1746 : : *
1747 : : * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1748 : : * lock to be released by some other process. Check if there's a deadlock; if
1749 : : * not, just return. (But signal ProcSleep to log a message, if
1750 : : * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1751 : : * the lock's wait queue and signal an error to ProcSleep.
1752 : : */
1753 : : static void
7946 1754 : 34 : CheckDeadLock(void)
1755 : : {
1756 : : int i;
1757 : :
1758 : : /*
1759 : : * Acquire exclusive lock on the entire shared lock data structures. Must
1760 : : * grab LWLocks in partition-number order to avoid LWLock deadlock.
1761 : : *
1762 : : * Note that the deadlock check interrupt had better not be enabled
1763 : : * anywhere that this process itself holds lock partition locks, else this
1764 : : * will wait forever. Also note that LWLockAcquire creates a critical
1765 : : * section, so that this routine cannot be interrupted by cancel/die
1766 : : * interrupts.
1767 : : */
6699 tgl@sss.pgh.pa.us 1768 [ + + ]: 578 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3730 rhaas@postgresql.org 1769 : 544 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
1770 : :
1771 : : /*
1772 : : * Check to see if we've been awoken by anyone in the interim.
1773 : : *
1774 : : * If we have, we can return and resume our transaction -- happy day.
1775 : : * Before we are awoken the process releasing the lock grants it to us so
1776 : : * we know that we don't have to wait anymore.
1777 : : *
1778 : : * We check by looking to see if we've been unlinked from the wait queue.
1779 : : * This is safe because we hold the lock partition lock.
1780 : : */
5642 tgl@sss.pgh.pa.us 1781 [ + + ]: 34 : if (MyProc->links.prev == NULL ||
1782 [ - + ]: 33 : MyProc->links.next == NULL)
6144 1783 : 1 : goto check_done;
1784 : :
1785 : : #ifdef LOCK_DEBUG
1786 : : if (Debug_deadlocks)
1787 : : DumpAllLocks();
1788 : : #endif
1789 : :
1790 : : /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1791 : 33 : deadlock_state = DeadLockCheck(MyProc);
1792 : :
6252 bruce@momjian.us 1793 [ + + ]: 33 : if (deadlock_state == DS_HARD_DEADLOCK)
1794 : : {
1795 : : /*
1796 : : * Oops. We have a deadlock.
1797 : : *
1798 : : * Get this process out of wait state. (Note: we could do this more
1799 : : * efficiently by relying on lockAwaited, but use this coding to
1800 : : * preserve the flexibility to kill some other transaction than the
1801 : : * one detecting the deadlock.)
1802 : : *
1803 : : * RemoveFromWaitQueue sets MyProc->waitStatus to
1804 : : * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1805 : : * return from the signal handler.
1806 : : */
1807 [ - + ]: 4 : Assert(MyProc->waitLock != NULL);
1808 : 4 : RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1809 : :
1810 : : /*
1811 : : * We're done here. Transaction abort caused by the error that
1812 : : * ProcSleep will raise will cause any other locks we hold to be
1813 : : * released, thus allowing other processes to wake up; we don't need
1814 : : * to do that here. NOTE: an exception is that releasing locks we
1815 : : * hold doesn't consider the possibility of waiters that were blocked
1816 : : * behind us on the lock we just failed to get, and might now be
1817 : : * wakable because we're not in front of them anymore. However,
1818 : : * RemoveFromWaitQueue took care of waking up any such processes.
1819 : : */
1820 : : }
1821 : :
1822 : : /*
1823 : : * And release locks. We do this in reverse order for two reasons: (1)
1824 : : * Anyone else who needs more than one of the locks will be trying to lock
1825 : : * them in increasing order; we don't want to release the other process
1826 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
1827 : : * behavior inside LWLockRelease.
1828 : : */
6144 tgl@sss.pgh.pa.us 1829 : 29 : check_done:
6402 bruce@momjian.us 1830 [ + + ]: 578 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3730 rhaas@postgresql.org 1831 : 544 : LWLockRelease(LockHashPartitionLockByIndex(i));
10141 scrappy@hub.org 1832 : 34 : }
1833 : :
1834 : : /*
1835 : : * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1836 : : *
1837 : : * NB: Runs inside a signal handler, be careful.
1838 : : */
1839 : : void
3358 andres@anarazel.de 1840 : 34 : CheckDeadLockAlert(void)
1841 : : {
1842 : 34 : int save_errno = errno;
1843 : :
1844 : 34 : got_deadlock_timeout = true;
1845 : :
1846 : : /*
1847 : : * Have to set the latch again, even if handle_sig_alarm already did. Back
1848 : : * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1849 : : * ever would be a problem, but setting a set latch again is cheap.
1850 : : *
1851 : : * Note that, when this function runs inside procsignal_sigusr1_handler(),
1852 : : * the handler function sets the latch again after the latch is set here.
1853 : : */
1854 : 34 : SetLatch(MyLatch);
1855 : 34 : errno = save_errno;
1856 : 34 : }
1857 : :
1858 : : /*
1859 : : * ProcWaitForSignal - wait for a signal from another backend.
1860 : : *
1861 : : * As this uses the generic process latch the caller has to be robust against
1862 : : * unrelated wakeups: Always check that the desired state has occurred, and
1863 : : * wait again if not.
1864 : : */
1865 : : void
2749 rhaas@postgresql.org 1866 : 17 : ProcWaitForSignal(uint32 wait_event_info)
1867 : : {
1969 tmunro@postgresql.or 1868 : 17 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1869 : : wait_event_info);
3358 andres@anarazel.de 1870 : 17 : ResetLatch(MyLatch);
1871 [ - + ]: 17 : CHECK_FOR_INTERRUPTS();
8318 tgl@sss.pgh.pa.us 1872 : 17 : }
1873 : :
1874 : : /*
1875 : : * ProcSendSignal - set the latch of a backend identified by ProcNumber
1876 : : */
1877 : : void
42 heikki.linnakangas@i 1878 :GNC 3 : ProcSendSignal(ProcNumber procNumber)
1879 : : {
1880 [ + - - + ]: 3 : if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
42 heikki.linnakangas@i 1881 [ # # ]:UNC 0 : elog(ERROR, "procNumber out of range");
1882 : :
42 heikki.linnakangas@i 1883 :GNC 3 : SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
8318 tgl@sss.pgh.pa.us 1884 :CBC 3 : }
1885 : :
1886 : : /*
1887 : : * BecomeLockGroupLeader - designate process as lock group leader
1888 : : *
1889 : : * Once this function has returned, other processes can join the lock group
1890 : : * by calling BecomeLockGroupMember.
1891 : : */
1892 : : void
2989 rhaas@postgresql.org 1893 : 544 : BecomeLockGroupLeader(void)
1894 : : {
1895 : : LWLock *leader_lwlock;
1896 : :
1897 : : /* If we already did it, we don't need to do it again. */
1898 [ + + ]: 544 : if (MyProc->lockGroupLeader == MyProc)
1899 : 485 : return;
1900 : :
1901 : : /* We had better not be a follower. */
1902 [ - + ]: 59 : Assert(MyProc->lockGroupLeader == NULL);
1903 : :
1904 : : /* Create single-member group, containing only ourselves. */
1905 : 59 : leader_lwlock = LockHashPartitionLockByProc(MyProc);
1906 : 59 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1907 : 59 : MyProc->lockGroupLeader = MyProc;
1908 : 59 : dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1909 : 59 : LWLockRelease(leader_lwlock);
1910 : : }
1911 : :
1912 : : /*
1913 : : * BecomeLockGroupMember - designate process as lock group member
1914 : : *
1915 : : * This is pretty straightforward except for the possibility that the leader
1916 : : * whose group we're trying to join might exit before we manage to do so;
1917 : : * and the PGPROC might get recycled for an unrelated process. To avoid
1918 : : * that, we require the caller to pass the PID of the intended PGPROC as
1919 : : * an interlock. Returns true if we successfully join the intended lock
1920 : : * group, and false if not.
1921 : : */
1922 : : bool
1923 : 1322 : BecomeLockGroupMember(PGPROC *leader, int pid)
1924 : : {
1925 : : LWLock *leader_lwlock;
1926 : 1322 : bool ok = false;
1927 : :
1928 : : /* Group leader can't become member of group */
1929 [ - + ]: 1322 : Assert(MyProc != leader);
1930 : :
1931 : : /* Can't already be a member of a group */
2974 tgl@sss.pgh.pa.us 1932 [ - + ]: 1322 : Assert(MyProc->lockGroupLeader == NULL);
1933 : :
1934 : : /* PID must be valid. */
2989 rhaas@postgresql.org 1935 [ - + ]: 1322 : Assert(pid != 0);
1936 : :
1937 : : /*
1938 : : * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1939 : : * calculates the proc number based on the PGPROC slot without looking at
1940 : : * its contents, so we will acquire the correct lock even if the leader
1941 : : * PGPROC is in process of being recycled.
1942 : : */
2975 1943 : 1322 : leader_lwlock = LockHashPartitionLockByProc(leader);
2989 1944 : 1322 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1945 : :
1946 : : /* Is this the leader we're looking for? */
2974 tgl@sss.pgh.pa.us 1947 [ + - + - ]: 1322 : if (leader->pid == pid && leader->lockGroupLeader == leader)
1948 : : {
1949 : : /* OK, join the group */
2989 rhaas@postgresql.org 1950 : 1322 : ok = true;
1951 : 1322 : MyProc->lockGroupLeader = leader;
1952 : 1322 : dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1953 : : }
1954 : 1322 : LWLockRelease(leader_lwlock);
1955 : :
1956 : 1322 : return ok;
1957 : : }
|