Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * proc.c
4 : * routines to manage per-process shared memory data structure
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/storage/lmgr/proc.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /*
16 : * Interface (a):
17 : * ProcSleep(), ProcWakeup(),
18 : *
19 : * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 : * the lock wakes the process up again (and gives it an error code so it knows
21 : * whether it was awoken on an error condition).
22 : *
23 : * Interface (b):
24 : *
25 : * ProcReleaseLocks -- frees the locks associated with current transaction
26 : *
27 : * ProcKill -- destroys the shared memory state (and locks)
28 : * associated with the process.
29 : */
30 : #include "postgres.h"
31 :
32 : #include <signal.h>
33 : #include <unistd.h>
34 : #include <sys/time.h>
35 :
36 : #include "access/transam.h"
37 : #include "access/twophase.h"
38 : #include "access/xlogutils.h"
39 : #include "miscadmin.h"
40 : #include "pgstat.h"
41 : #include "postmaster/autovacuum.h"
42 : #include "replication/slot.h"
43 : #include "replication/syncrep.h"
44 : #include "replication/walsender.h"
45 : #include "storage/condition_variable.h"
46 : #include "storage/ipc.h"
47 : #include "storage/lmgr.h"
48 : #include "storage/pmsignal.h"
49 : #include "storage/proc.h"
50 : #include "storage/procarray.h"
51 : #include "storage/procsignal.h"
52 : #include "storage/spin.h"
53 : #include "storage/standby.h"
54 : #include "utils/timeout.h"
55 : #include "utils/timestamp.h"
56 :
57 : /* GUC variables */
58 : int DeadlockTimeout = 1000;
59 : int StatementTimeout = 0;
60 : int LockTimeout = 0;
61 : int IdleInTransactionSessionTimeout = 0;
62 : int IdleSessionTimeout = 0;
63 : bool log_lock_waits = false;
64 :
65 : /* Pointer to this process's PGPROC struct, if any */
66 : PGPROC *MyProc = NULL;
67 :
68 : /*
69 : * This spinlock protects the freelist of recycled PGPROC structures.
70 : * We cannot use an LWLock because the LWLock manager depends on already
71 : * having a PGPROC and a wait semaphore! But these structures are touched
72 : * relatively infrequently (only at backend startup or shutdown) and not for
73 : * very long, so a spinlock is okay.
74 : */
75 : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
76 :
77 : /* Pointers to shared-memory structures */
78 : PROC_HDR *ProcGlobal = NULL;
79 : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
80 : PGPROC *PreparedXactProcs = NULL;
81 :
82 : /* If we are waiting for a lock, this points to the associated LOCALLOCK */
83 : static LOCALLOCK *lockAwaited = NULL;
84 :
85 : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
86 :
87 : /* Is a deadlock check pending? */
88 : static volatile sig_atomic_t got_deadlock_timeout;
89 :
90 : static void RemoveProcFromArray(int code, Datum arg);
91 : static void ProcKill(int code, Datum arg);
92 : static void AuxiliaryProcKill(int code, Datum arg);
93 : static void CheckDeadLock(void);
94 :
95 :
96 : /*
97 : * Report shared-memory space needed by InitProcGlobal.
6766 tgl 98 ECB : */
99 : Size
6505 tgl 100 CBC 2738 : ProcGlobalShmemSize(void)
101 : {
6441 102 2738 : Size size = 0;
103 : Size TotalProcs =
362 rhaas 104 GIC 2738 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
6441 tgl 105 ECB :
106 : /* ProcGlobal */
6441 tgl 107 CBC 2738 : size = add_size(size, sizeof(PROC_HDR));
968 andres 108 GIC 2738 : size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
6441 tgl 109 CBC 2738 : size = add_size(size, sizeof(slock_t));
6766 tgl 110 ECB :
968 andres 111 CBC 2738 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
968 andres 112 GIC 2738 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
874 alvherre 113 CBC 2738 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
114 :
6766 tgl 115 GIC 2738 : return size;
116 : }
117 :
118 : /*
119 : * Report number of semaphores needed by InitProcGlobal.
7644 tgl 120 ECB : */
121 : int
6505 tgl 122 GIC 2738 : ProcGlobalSemas(void)
123 : {
124 : /*
125 : * We need a sema per backend (including autovacuum), plus one for each
5837 alvherre 126 ECB : * auxiliary process.
127 : */
362 rhaas 128 GIC 2738 : return MaxBackends + NUM_AUXILIARY_PROCS;
129 : }
130 :
131 : /*
132 : * InitProcGlobal -
133 : * Initialize the global process table during postmaster or standalone
134 : * backend startup.
135 : *
136 : * We also create all the per-process semaphores we will need to support
137 : * the requested number of backends. We used to allocate semaphores
138 : * only when backends were actually started up, but that is bad because
139 : * it lets Postgres fail under load --- a lot of Unix systems are
140 : * (mis)configured with small limits on the number of semaphores, and
141 : * running out when trying to start another backend is a common failure.
142 : * So, now we grab enough semaphores to support the desired max number
143 : * of backends immediately at initialization --- if the sysadmin has set
144 : * MaxConnections, max_worker_processes, max_wal_senders, or
145 : * autovacuum_max_workers higher than his kernel will support, he'll
146 : * find out sooner rather than later.
147 : *
148 : * Another reason for creating semaphores here is that the semaphore
149 : * implementation typically requires us to create semaphores in the
150 : * postmaster, not in backends.
151 : *
152 : * Note: this is NOT called by individual backends under a postmaster,
153 : * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
154 : * pointers must be propagated specially for EXEC_BACKEND operation.
9770 scrappy 155 ECB : */
156 : void
6505 tgl 157 GIC 1826 : InitProcGlobal(void)
158 : {
159 : PGPROC *procs;
160 : int i,
4177 rhaas 161 ECB : j;
162 : bool found;
362 rhaas 163 GIC 1826 : uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
9770 scrappy 164 ECB :
6304 tgl 165 : /* Create the ProcGlobal shared structure */
9345 bruce 166 CBC 1826 : ProcGlobal = (PROC_HDR *)
6304 tgl 167 GIC 1826 : ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
168 1826 : Assert(!found);
169 :
170 : /*
6304 tgl 171 ECB : * Initialize the data structures.
172 : */
4319 rhaas 173 CBC 1826 : ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
81 andres 174 GNC 1826 : dlist_init(&ProcGlobal->freeProcs);
175 1826 : dlist_init(&ProcGlobal->autovacFreeProcs);
176 1826 : dlist_init(&ProcGlobal->bgworkerFreeProcs);
177 1826 : dlist_init(&ProcGlobal->walsenderFreeProcs);
4268 tgl 178 CBC 1826 : ProcGlobal->startupBufferPinWaitBufId = -1;
3988 179 1826 : ProcGlobal->walwriterLatch = NULL;
180 1826 : ProcGlobal->checkpointerLatch = NULL;
2614 rhaas 181 GIC 1826 : pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PGPROCNO);
2046 182 1826 : pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PGPROCNO);
183 :
184 : /*
185 : * Create and initialize all the PGPROC structures we'll need. There are
186 : * five separate consumers: (1) normal backends, (2) autovacuum workers
187 : * and the autovacuum launcher, (3) background workers, (4) auxiliary
188 : * processes, and (5) prepared transactions. Each PGPROC structure is
189 : * dedicated to exactly one of these purposes, and they do not move
3602 bruce 190 ECB : * between groups.
4969 tgl 191 : */
4319 rhaas 192 CBC 1826 : procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
2411 tgl 193 GIC 1826 : MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
4334 rhaas 194 CBC 1826 : ProcGlobal->allProcs = procs;
195 : /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
362 rhaas 196 GIC 1826 : ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
197 :
198 : /*
199 : * Allocate arrays mirroring PGPROC fields in a dense manner. See
200 : * PROC_HDR.
201 : *
202 : * XXX: It might make sense to increase padding for these arrays, given
968 andres 203 ECB : * how hotly they are accessed.
204 : */
968 andres 205 CBC 3652 : ProcGlobal->xids =
206 1826 : (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
207 4219 : MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
208 1826 : ProcGlobal->subxidStates = (XidCacheStatus *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->subxidStates));
209 1898 : MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
874 alvherre 210 GIC 1826 : ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
874 alvherre 211 CBC 1862 : MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
212 :
4319 rhaas 213 203098 : for (i = 0; i < TotalProcs; i++)
214 : {
81 andres 215 GNC 201272 : PGPROC *proc = &procs[i];
216 :
217 : /* Common initialization for all PGPROCs, regardless of type. */
218 :
219 : /*
220 : * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
221 : * dummy PGPROCs don't need these though - they're never associated
222 : * with a real process
223 : */
362 rhaas 224 CBC 201272 : if (i < MaxBackends + NUM_AUXILIARY_PROCS)
225 : {
81 andres 226 GNC 200507 : proc->sem = PGSemaphoreCreate();
227 200507 : InitSharedLatch(&(proc->procLatch));
228 200507 : LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
229 : }
230 201272 : proc->pgprocno = i;
231 :
232 : /*
233 : * Newly created PGPROCs for normal backends, autovacuum and bgworkers
234 : * must be queued up on the appropriate free list. Because there can
235 : * only ever be a small, fixed number of auxiliary processes, no free
236 : * list is used in that case; InitAuxiliaryProcess() instead uses a
237 : * linear search. PGPROCs for prepared transactions are added to a
238 : * free list by TwoPhaseShmemInit().
239 : */
4319 rhaas 240 CBC 201272 : if (i < MaxConnections)
241 : {
242 : /* PGPROC for normal backend, add to freeProcs list */
81 andres 243 GNC 153200 : dlist_push_head(&ProcGlobal->freeProcs, &proc->links);
244 153200 : proc->procgloballist = &ProcGlobal->freeProcs;
4319 rhaas 245 ECB : }
3776 alvherre 246 GIC 48072 : else if (i < MaxConnections + autovacuum_max_workers + 1)
247 : {
4319 rhaas 248 ECB : /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
81 andres 249 GNC 7304 : dlist_push_head(&ProcGlobal->autovacFreeProcs, &proc->links);
250 7304 : proc->procgloballist = &ProcGlobal->autovacFreeProcs;
251 : }
1517 michael 252 GIC 40768 : else if (i < MaxConnections + autovacuum_max_workers + 1 + max_worker_processes)
3776 alvherre 253 ECB : {
254 : /* PGPROC for bgworker, add to bgworkerFreeProcs list */
81 andres 255 GNC 14608 : dlist_push_head(&ProcGlobal->bgworkerFreeProcs, &proc->links);
256 14608 : proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
257 : }
362 rhaas 258 CBC 26160 : else if (i < MaxBackends)
1517 michael 259 ECB : {
260 : /* PGPROC for walsender, add to walsenderFreeProcs list */
81 andres 261 GNC 16265 : dlist_push_head(&ProcGlobal->walsenderFreeProcs, &proc->links);
262 16265 : proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
1517 michael 263 ECB : }
264 :
265 : /* Initialize myProcLocks[] shared memory queues. */
4177 rhaas 266 CBC 3421624 : for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
81 andres 267 GNC 3220352 : dlist_init(&(proc->myProcLocks[j]));
268 :
269 : /* Initialize lockGroupMembers list. */
270 201272 : dlist_init(&proc->lockGroupMembers);
271 :
1608 akapila 272 ECB : /*
273 : * Initialize the atomic variables, otherwise, it won't be safe to
274 : * access them for backends that aren't currently in use.
275 : */
81 andres 276 GNC 201272 : pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PGPROCNO);
277 201272 : pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PGPROCNO);
278 201272 : pg_atomic_init_u64(&(proc->waitStart), 0);
279 : }
280 :
4969 tgl 281 ECB : /*
3955 bruce 282 : * Save pointers to the blocks of PGPROC structures reserved for auxiliary
283 : * processes and prepared transactions.
284 : */
362 rhaas 285 CBC 1826 : AuxiliaryProcs = &procs[MaxBackends];
286 1826 : PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
6304 tgl 287 ECB :
288 : /* Create ProcStructLock spinlock, too */
6304 tgl 289 GIC 1826 : ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
290 1826 : SpinLockInit(ProcStructLock);
9770 scrappy 291 1826 : }
292 :
7862 tgl 293 ECB : /*
294 : * InitProcess -- initialize a per-process data structure for this backend
295 : */
296 : void
8167 tgl 297 GIC 11513 : InitProcess(void)
298 : {
299 : dlist_head *procgloballist;
300 :
7884 tgl 301 ECB : /*
6304 tgl 302 EUB : * ProcGlobal should be set up already (if we are a backend, we inherit
303 : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
7884 tgl 304 ECB : */
2732 rhaas 305 GBC 11513 : if (ProcGlobal == NULL)
7199 tgl 306 UIC 0 : elog(PANIC, "proc header uninitialized");
307 :
7884 tgl 308 CBC 11513 : if (MyProc != NULL)
7199 tgl 309 LBC 0 : elog(ERROR, "you already exist");
9345 bruce 310 ECB :
2812 rhaas 311 : /* Decide which list should supply our PGPROC. */
2812 rhaas 312 CBC 11513 : if (IsAnyAutoVacuumProcess())
2732 313 322 : procgloballist = &ProcGlobal->autovacFreeProcs;
2812 rhaas 314 GIC 11191 : else if (IsBackgroundWorker)
2732 rhaas 315 CBC 1948 : procgloballist = &ProcGlobal->bgworkerFreeProcs;
1517 michael 316 GIC 9243 : else if (am_walsender)
317 833 : procgloballist = &ProcGlobal->walsenderFreeProcs;
318 : else
2732 rhaas 319 8410 : procgloballist = &ProcGlobal->freeProcs;
320 :
321 : /*
322 : * Try to get a proc struct from the appropriate free list. If this
323 : * fails, we must be out of PGPROC structures (not to mention semaphores).
6389 tgl 324 ECB : *
325 : * While we are holding the ProcStructLock, also copy the current shared
6385 bruce 326 : * estimate of spins_per_delay to local storage.
327 : */
7862 tgl 328 CBC 11513 : SpinLockAcquire(ProcStructLock);
329 :
2732 rhaas 330 11513 : set_spins_per_delay(ProcGlobal->spins_per_delay);
6389 tgl 331 ECB :
81 andres 332 GNC 11513 : if (!dlist_is_empty(procgloballist))
333 : {
334 11511 : MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
7862 tgl 335 GIC 11511 : SpinLockRelease(ProcStructLock);
336 : }
337 : else
338 : {
9345 bruce 339 ECB : /*
6385 340 : * If we reach here, all the PGPROCs are in use. This is one of the
341 : * possible places to detect "too many backends", so give the standard
342 : * error message. XXX do we need to give a different failure message
343 : * in the autovacuum case?
344 : */
7862 tgl 345 GBC 2 : SpinLockRelease(ProcStructLock);
1517 michael 346 GIC 2 : if (am_walsender)
347 2 : ereport(FATAL,
348 : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
349 : errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
350 : max_wal_senders)));
7199 tgl 351 UIC 0 : ereport(FATAL,
352 : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
353 : errmsg("sorry, too many clients already")));
9770 scrappy 354 ECB : }
355 :
356 : /*
357 : * Cross-check that the PGPROC is of the type we expect; if this were not
358 : * the case, it would get returned to the wrong list.
359 : */
2812 rhaas 360 GIC 11511 : Assert(MyProc->procgloballist == procgloballist);
361 :
5087 tgl 362 ECB : /*
363 : * Now that we have a PGPROC, mark ourselves as an active postmaster
364 : * child; this is so that the postmaster can detect it if we exit without
365 : * cleaning up. (XXX autovac launcher currently doesn't participate in
366 : * this; it probably should.)
367 : */
4969 tgl 368 GIC 11511 : if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
4612 tgl 369 CBC 10586 : MarkPostmasterChildActive();
5087 tgl 370 ECB :
9770 scrappy 371 : /*
3955 bruce 372 : * Initialize all fields of MyProc, except for those previously
373 : * initialized by InitProcGlobal.
8120 tgl 374 : */
81 andres 375 GNC 11511 : dlist_node_init(&MyProc->links);
1026 peter 376 CBC 11511 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
5695 tgl 377 GIC 11511 : MyProc->lxid = InvalidLocalTransactionId;
3783 simon 378 CBC 11511 : MyProc->fpVXIDLock = false;
379 11511 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
968 andres 380 11511 : MyProc->xid = InvalidTransactionId;
969 381 11511 : MyProc->xmin = InvalidTransactionId;
7862 tgl 382 11511 : MyProc->pid = MyProcPid;
5695 tgl 383 ECB : /* backendId, databaseId and roleId will be filled in later */
5695 tgl 384 CBC 11511 : MyProc->backendId = InvalidBackendId;
6304 tgl 385 GIC 11511 : MyProc->databaseId = InvalidOid;
6461 tgl 386 CBC 11511 : MyProc->roleId = InvalidOid;
1700 michael 387 11511 : MyProc->tempNamespaceId = InvalidOid;
2258 andrew 388 11511 : MyProc->isBackgroundWorker = IsBackgroundWorker;
366 rhaas 389 11511 : MyProc->delayChkptFlags = 0;
874 alvherre 390 11511 : MyProc->statusFlags = 0;
4969 tgl 391 ECB : /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
5646 alvherre 392 CBC 11511 : if (IsAutoVacuumWorkerProcess())
874 alvherre 393 GIC 16 : MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
140 andres 394 GNC 11511 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4087 heikki.linnakangas 395 GIC 11511 : MyProc->lwWaitMode = 0;
8112 tgl 396 11511 : MyProc->waitLock = NULL;
6799 397 11511 : MyProc->waitProcLock = NULL;
776 fujii 398 CBC 11511 : pg_atomic_write_u64(&MyProc->waitStart, 0);
4177 rhaas 399 ECB : #ifdef USE_ASSERT_CHECKING
400 : {
401 : int i;
402 :
403 : /* Last process should have released all locks. */
4177 rhaas 404 GIC 195687 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
81 andres 405 GNC 184176 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
4177 rhaas 406 ECB : }
407 : #endif
4831 simon 408 GIC 11511 : MyProc->recoveryConflictPending = false;
409 :
4260 tgl 410 ECB : /* Initialize fields for sync rep */
3941 heikki.linnakangas 411 CBC 11511 : MyProc->waitLSN = 0;
4417 simon 412 11511 : MyProc->syncRepState = SYNC_REP_NOT_WAITING;
81 andres 413 GNC 11511 : dlist_node_init(&MyProc->syncRepLinks);
414 :
2803 rhaas 415 ECB : /* Initialize fields for group XID clearing. */
2614 rhaas 416 CBC 11511 : MyProc->procArrayGroupMember = false;
2614 rhaas 417 GIC 11511 : MyProc->procArrayGroupMemberXid = InvalidTransactionId;
1608 akapila 418 11511 : Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PGPROCNO);
2803 rhaas 419 ECB :
420 : /* Check that group locking fields are in a proper initial state. */
2618 rhaas 421 GIC 11511 : Assert(MyProc->lockGroupLeader == NULL);
2618 rhaas 422 CBC 11511 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
2618 rhaas 423 ECB :
2586 424 : /* Initialize wait event information. */
2586 rhaas 425 CBC 11511 : MyProc->wait_event_info = 0;
2586 rhaas 426 ECB :
2046 427 : /* Initialize fields for group transaction status update. */
2046 rhaas 428 GIC 11511 : MyProc->clogGroupMember = false;
429 11511 : MyProc->clogGroupMemberXid = InvalidTransactionId;
430 11511 : MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
431 11511 : MyProc->clogGroupMemberPage = -1;
432 11511 : MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
1608 akapila 433 11511 : Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PGPROCNO);
2046 rhaas 434 ECB :
4260 tgl 435 : /*
436 : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
437 : * on it. That allows us to repoint the process latch, which so far
3007 andres 438 : * points to process local one, to the shared one.
439 : */
4260 tgl 440 GIC 11511 : OwnLatch(&MyProc->procLatch);
3007 andres 441 11511 : SwitchToSharedLatch();
442 :
443 : /* now that we have a proc, report wait events to shared memory */
736 444 11511 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
736 andres 445 ECB :
446 : /*
447 : * We might be reusing a semaphore that belonged to a failed process. So
448 : * be careful and reinitialize its value here. (This is not strictly
449 : * necessary anymore, but seems like a good idea for cleanliness.)
6534 tgl 450 : */
2309 tgl 451 GIC 11511 : PGSemaphoreReset(MyProc->sem);
452 :
453 : /*
454 : * Arrange to clean up at backend exit.
455 : */
8120 tgl 456 CBC 11511 : on_shmem_exit(ProcKill, 0);
8109 tgl 457 ECB :
458 : /*
459 : * Now that we have a PGPROC, we could try to acquire locks, so initialize
460 : * local state needed for LWLocks, and the deadlock checker.
461 : */
3205 heikki.linnakangas 462 GIC 11511 : InitLWLockAccess();
8109 tgl 463 11511 : InitDeadLockChecking();
8120 464 11511 : }
465 :
466 : /*
467 : * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
6304 tgl 468 ECB : *
469 : * This is separate from InitProcess because we can't acquire LWLocks until
4988 470 : * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
471 : * work until after we've done CreateSharedMemoryAndSemaphores.
472 : */
473 : void
6304 tgl 474 GIC 11502 : InitProcessPhase2(void)
6304 tgl 475 ECB : {
6304 tgl 476 GIC 11502 : Assert(MyProc != NULL);
477 :
478 : /*
479 : * Add our PGPROC to the PGPROC array in shared memory.
6304 tgl 480 ECB : */
6304 tgl 481 CBC 11502 : ProcArrayAdd(MyProc);
482 :
483 : /*
484 : * Arrange to clean that up at backend exit.
485 : */
6304 tgl 486 GIC 11502 : on_shmem_exit(RemoveProcFromArray, 0);
487 11502 : }
488 :
489 : /*
490 : * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
491 : *
492 : * This is called by bgwriter and similar processes so that they will have a
493 : * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
494 : * and sema that are assigned are one of the extra ones created during
495 : * InitProcGlobal.
496 : *
497 : * Auxiliary processes are presently not expected to wait for real (lockmgr)
498 : * locks, so we need not set up the deadlock checker. They are never added
499 : * to the ProcArray or the sinval messaging mechanism, either. They also
500 : * don't get a VXID assigned, since this is only useful when we actually
501 : * hold lockmgr locks.
502 : *
4859 simon 503 ECB : * Startup process however uses locks but never waits for them in the
504 : * normal backend sense. Startup process also takes part in sinval messaging
505 : * as a sendOnly process, so never reads messages from sinval queue. So
506 : * Startup process does have a VXID and does show up in pg_locks.
507 : */
508 : void
5877 alvherre 509 GIC 1784 : InitAuxiliaryProcess(void)
510 : {
511 : PGPROC *auxproc;
6304 tgl 512 ECB : int proctype;
7081 JanWieck 513 EUB :
514 : /*
6304 tgl 515 ECB : * ProcGlobal should be set up already (if we are a backend, we inherit
6304 tgl 516 EUB : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
517 : */
5877 alvherre 518 GIC 1784 : if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
7199 tgl 519 UIC 0 : elog(PANIC, "proc header uninitialized");
520 :
7862 tgl 521 GIC 1784 : if (MyProc != NULL)
7199 tgl 522 UIC 0 : elog(ERROR, "you already exist");
523 :
524 : /*
6304 tgl 525 ECB : * We use the ProcStructLock to protect assignment and releasing of
526 : * AuxiliaryProcs entries.
6389 527 : *
528 : * While we are holding the ProcStructLock, also copy the current shared
529 : * estimate of spins_per_delay to local storage.
530 : */
6389 tgl 531 GIC 1784 : SpinLockAcquire(ProcStructLock);
6389 tgl 532 ECB :
6389 tgl 533 GIC 1784 : set_spins_per_delay(ProcGlobal->spins_per_delay);
6389 tgl 534 ECB :
7862 535 : /*
5877 alvherre 536 : * Find a free auxproc ... *big* trouble if there isn't one ...
537 : */
5877 alvherre 538 CBC 4108 : for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
539 : {
5877 alvherre 540 GBC 4108 : auxproc = &AuxiliaryProcs[proctype];
541 4108 : if (auxproc->pid == 0)
6304 tgl 542 GIC 1784 : break;
543 : }
5877 alvherre 544 1784 : if (proctype >= NUM_AUXILIARY_PROCS)
545 : {
6389 tgl 546 LBC 0 : SpinLockRelease(ProcStructLock);
5877 alvherre 547 UIC 0 : elog(FATAL, "all AuxiliaryProcs are in use");
6389 tgl 548 ECB : }
549 :
5877 alvherre 550 : /* Mark auxiliary proc as in use by me */
551 : /* use volatile pointer to prevent code rearrangement */
5877 alvherre 552 GIC 1784 : ((volatile PGPROC *) auxproc)->pid = MyProcPid;
553 :
554 1784 : MyProc = auxproc;
555 :
6389 tgl 556 CBC 1784 : SpinLockRelease(ProcStructLock);
6389 tgl 557 ECB :
7862 558 : /*
3955 bruce 559 : * Initialize all fields of MyProc, except for those previously
560 : * initialized by InitProcGlobal.
7862 tgl 561 : */
81 andres 562 GNC 1784 : dlist_node_init(&MyProc->links);
1026 peter 563 CBC 1784 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
5695 tgl 564 1784 : MyProc->lxid = InvalidLocalTransactionId;
3783 simon 565 1784 : MyProc->fpVXIDLock = false;
566 1784 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
968 andres 567 1784 : MyProc->xid = InvalidTransactionId;
969 568 1784 : MyProc->xmin = InvalidTransactionId;
5695 tgl 569 1784 : MyProc->backendId = InvalidBackendId;
6304 570 1784 : MyProc->databaseId = InvalidOid;
6461 571 1784 : MyProc->roleId = InvalidOid;
1700 michael 572 1784 : MyProc->tempNamespaceId = InvalidOid;
2258 andrew 573 1784 : MyProc->isBackgroundWorker = IsBackgroundWorker;
366 rhaas 574 1784 : MyProc->delayChkptFlags = 0;
874 alvherre 575 GIC 1784 : MyProc->statusFlags = 0;
140 andres 576 GNC 1784 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4087 heikki.linnakangas 577 GIC 1784 : MyProc->lwWaitMode = 0;
7862 tgl 578 1784 : MyProc->waitLock = NULL;
6799 579 1784 : MyProc->waitProcLock = NULL;
776 fujii 580 CBC 1784 : pg_atomic_write_u64(&MyProc->waitStart, 0);
4177 rhaas 581 ECB : #ifdef USE_ASSERT_CHECKING
582 : {
583 : int i;
584 :
585 : /* Last process should have released all locks. */
4177 rhaas 586 GIC 30328 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
81 andres 587 GNC 28544 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
588 : }
589 : #endif
7862 tgl 590 ECB :
4260 591 : /*
592 : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
593 : * on it. That allows us to repoint the process latch, which so far
3007 andres 594 : * points to process local one, to the shared one.
595 : */
4260 tgl 596 GIC 1784 : OwnLatch(&MyProc->procLatch);
3007 andres 597 CBC 1784 : SwitchToSharedLatch();
4260 tgl 598 ECB :
599 : /* now that we have a proc, report wait events to shared memory */
736 andres 600 GIC 1784 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
601 :
602 : /* Check that group locking fields are in a proper initial state. */
2618 rhaas 603 1784 : Assert(MyProc->lockGroupLeader == NULL);
604 1784 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
2618 rhaas 605 ECB :
606 : /*
607 : * We might be reusing a semaphore that belonged to a failed process. So
608 : * be careful and reinitialize its value here. (This is not strictly
609 : * necessary anymore, but seems like a good idea for cleanliness.)
7862 tgl 610 : */
2309 tgl 611 CBC 1784 : PGSemaphoreReset(MyProc->sem);
612 :
613 : /*
614 : * Arrange to clean up at process exit.
615 : */
5877 alvherre 616 GIC 1784 : on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
9770 scrappy 617 1784 : }
618 :
619 : /*
620 : * Used from bufmgr to share the value of the buffer that Startup waits on,
4824 simon 621 ECB : * or to reset the value to "not waiting" (-1). This allows processing
622 : * of recovery conflicts for buffer pins. Set is made before backends look
623 : * at this value, so locking not required, especially since the set is
624 : * an atomic integer set operation.
625 : */
626 : void
4824 simon 627 CBC 22 : SetStartupBufferPinWaitBufId(int bufid)
628 : {
629 : /* use volatile pointer to prevent code rearrangement */
4824 simon 630 GIC 22 : volatile PROC_HDR *procglobal = ProcGlobal;
631 :
632 22 : procglobal->startupBufferPinWaitBufId = bufid;
4824 simon 633 CBC 22 : }
634 :
635 : /*
4824 simon 636 ECB : * Used by backends when they receive a request to check for buffer pin waits.
637 : */
638 : int
4824 simon 639 GIC 5 : GetStartupBufferPinWaitBufId(void)
640 : {
641 : /* use volatile pointer to prevent code rearrangement */
642 5 : volatile PROC_HDR *procglobal = ProcGlobal;
643 :
4268 tgl 644 5 : return procglobal->startupBufferPinWaitBufId;
645 : }
646 :
647 : /*
648 : * Check whether there are at least N free PGPROC objects. If false is
649 : * returned, *nfree will be set to the number of free PGPROC objects.
650 : * Otherwise, *nfree will be set to n.
6505 tgl 651 ECB : *
652 : * Note: this is designed on the assumption that N will generally be small.
653 : */
654 : bool
79 rhaas 655 GNC 204 : HaveNFreeProcs(int n, int *nfree)
6505 tgl 656 ECB : {
657 : dlist_iter iter;
658 :
79 rhaas 659 GNC 204 : Assert(n > 0);
660 204 : Assert(nfree);
79 rhaas 661 ECB :
6505 tgl 662 GIC 204 : SpinLockAcquire(ProcStructLock);
6505 tgl 663 ECB :
79 rhaas 664 GNC 204 : *nfree = 0;
81 andres 665 612 : dlist_foreach(iter, &ProcGlobal->freeProcs)
6505 tgl 666 ECB : {
79 rhaas 667 GNC 612 : (*nfree)++;
668 612 : if (*nfree == n)
81 andres 669 204 : break;
670 : }
6505 tgl 671 ECB :
6505 tgl 672 GIC 204 : SpinLockRelease(ProcStructLock);
6505 tgl 673 ECB :
79 rhaas 674 GNC 204 : return (*nfree == n);
675 : }
676 :
677 : /*
678 : * Check if the current process is awaiting a lock.
679 : */
4803 simon 680 ECB : bool
4803 simon 681 GIC 8 : IsWaitingForLock(void)
4803 simon 682 ECB : {
4803 simon 683 CBC 8 : if (lockAwaited == NULL)
4803 simon 684 GIC 6 : return false;
4803 simon 685 ECB :
4803 simon 686 GIC 2 : return true;
687 : }
688 :
689 : /*
690 : * Cancel any pending wait for lock, when aborting a transaction, and revert
691 : * any strong lock count acquisition for a lock being acquired.
692 : *
693 : * (Normally, this would only happen if we accept a cancel/die
694 : * interrupt while waiting; but an ereport(ERROR) before or during the lock
695 : * wait is within the realm of possibility, too.)
696 : */
5552 tgl 697 ECB : void
4008 rhaas 698 GIC 511245 : LockErrorCleanup(void)
699 : {
700 : LWLock *partitionLock;
701 : DisableTimeoutParams timeouts[2];
6328 tgl 702 ECB :
2988 heikki.linnakangas 703 GIC 511245 : HOLD_INTERRUPTS();
2988 heikki.linnakangas 704 ECB :
4008 rhaas 705 GIC 511245 : AbortStrongLockAcquire();
706 :
8120 tgl 707 ECB : /* Nothing to do if we weren't waiting for a lock */
6328 tgl 708 GIC 511245 : if (lockAwaited == NULL)
2988 heikki.linnakangas 709 ECB : {
2988 heikki.linnakangas 710 CBC 511203 : RESUME_INTERRUPTS();
5552 tgl 711 GIC 511203 : return;
712 : }
713 :
714 : /*
715 : * Turn off the deadlock and lock timeout timers, if they are still
716 : * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
717 : * indicator flag, since this function is executed before
718 : * ProcessInterrupts when responding to SIGINT; else we'd lose the
719 : * knowledge that the SIGINT came from a lock timeout and not an external
720 : * source.
3676 tgl 721 ECB : */
3676 tgl 722 CBC 42 : timeouts[0].id = DEADLOCK_TIMEOUT;
723 42 : timeouts[0].keep_indicator = false;
724 42 : timeouts[1].id = LOCK_TIMEOUT;
725 42 : timeouts[1].keep_indicator = true;
3676 tgl 726 GIC 42 : disable_timeouts(timeouts, 2);
727 :
8120 tgl 728 ECB : /* Unlink myself from the wait queue, if on it (might not be anymore!) */
6104 tgl 729 CBC 42 : partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
6328 tgl 730 GIC 42 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
6840 tgl 731 ECB :
81 andres 732 GNC 42 : if (!dlist_node_is_detached(&MyProc->links))
733 : {
6840 tgl 734 ECB : /* We could not have been granted the lock yet */
6104 tgl 735 GIC 40 : RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
736 : }
737 : else
738 : {
739 : /*
740 : * Somebody kicked us off the lock queue already. Perhaps they
741 : * granted us the lock, or perhaps they detected a deadlock. If they
742 : * did grant us the lock, we'd better remember it in our local lock
743 : * table.
6840 tgl 744 ECB : */
1026 peter 745 CBC 2 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
6799 tgl 746 GIC 2 : GrantAwaitedLock();
747 : }
6840 tgl 748 ECB :
6328 tgl 749 GIC 42 : lockAwaited = NULL;
6840 tgl 750 ECB :
6328 tgl 751 GIC 42 : LWLockRelease(partitionLock);
8448 inoue 752 ECB :
2988 heikki.linnakangas 753 GIC 42 : RESUME_INTERRUPTS();
754 : }
755 :
756 :
757 : /*
758 : * ProcReleaseLocks() -- release locks associated with current transaction
759 : * at main transaction commit or abort
760 : *
761 : * At main transaction commit, we release standard locks except session locks.
762 : * At main transaction abort, we release all locks including session locks.
763 : *
764 : * Advisory locks are released only if they are transaction-level;
765 : * session-level holds remain, whether this is a commit or not.
766 : *
767 : * At subtransaction commit, we don't release any locks (so this func is not
768 : * needed at all); we will defer the releasing to the parent transaction.
769 : * At subtransaction abort, we release all locks held by the subtransaction;
770 : * this is implemented by retail releasing of the locks under control of
771 : * the ResourceOwner mechanism.
772 : */
9770 scrappy 773 ECB : void
6840 tgl 774 GIC 486172 : ProcReleaseLocks(bool isCommit)
9770 scrappy 775 ECB : {
9345 bruce 776 GBC 486172 : if (!MyProc)
9345 bruce 777 UIC 0 : return;
8120 tgl 778 ECB : /* If waiting, get off wait queue (should only be needed after error) */
4008 rhaas 779 GIC 486172 : LockErrorCleanup();
3992 tgl 780 ECB : /* Release standard locks, including session-level if aborting */
6799 tgl 781 GIC 486172 : LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
3992 tgl 782 ECB : /* Release transaction-level advisory locks */
4433 itagaki.takahiro 783 GIC 486172 : LockReleaseAll(USER_LOCKMETHOD, false);
784 : }
785 :
786 :
787 : /*
788 : * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
789 : */
6304 tgl 790 ECB : static void
6304 tgl 791 GIC 11502 : RemoveProcFromArray(int code, Datum arg)
6304 tgl 792 ECB : {
6304 tgl 793 CBC 11502 : Assert(MyProc != NULL);
5692 794 11502 : ProcArrayRemove(MyProc, InvalidTransactionId);
6304 tgl 795 GIC 11502 : }
796 :
797 : /*
798 : * ProcKill() -- Destroy the per-proc data structure for
799 : * this process. Release any of its held LW locks.
800 : */
9770 scrappy 801 ECB : static void
7058 peter_e 802 GIC 11511 : ProcKill(int code, Datum arg)
803 : {
804 : PGPROC *proc;
805 : dlist_head *procgloballist;
7772 tgl 806 ECB :
7884 tgl 807 GIC 11511 : Assert(MyProc != NULL);
808 :
4260 tgl 809 ECB : /* Make sure we're out of the sync rep lists */
4260 tgl 810 GIC 11511 : SyncRepCleanupAtProcExit();
811 :
812 : #ifdef USE_ASSERT_CHECKING
813 : {
814 : int i;
815 :
4177 rhaas 816 ECB : /* Last process should have released all locks. */
4177 rhaas 817 CBC 195687 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
81 andres 818 GNC 184176 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
819 : }
820 : #endif
821 :
822 : /*
823 : * Release any LW locks I am holding. There really shouldn't be any, but
824 : * it's cheap to check again before we cut the knees off the LWLock
825 : * facility by releasing our PGPROC ...
7501 tgl 826 ECB : */
6453 tgl 827 GIC 11511 : LWLockReleaseAll();
828 :
2329 rhaas 829 ECB : /* Cancel any pending condition variable sleep, too */
2329 rhaas 830 GIC 11511 : ConditionVariableCancelSleep();
831 :
832 : /*
833 : * Detach from any lock group of which we are a member. If the leader
834 : * exits before all other group members, its PGPROC will remain allocated
835 : * until the last group process exits; that process must return the
836 : * leader's PGPROC to the appropriate list.
2618 rhaas 837 ECB : */
2618 rhaas 838 GIC 11511 : if (MyProc->lockGroupLeader != NULL)
2618 rhaas 839 ECB : {
2618 rhaas 840 CBC 1353 : PGPROC *leader = MyProc->lockGroupLeader;
2618 rhaas 841 GIC 1353 : LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
2618 rhaas 842 ECB :
2618 rhaas 843 CBC 1353 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
844 1353 : Assert(!dlist_is_empty(&leader->lockGroupMembers));
845 1353 : dlist_delete(&MyProc->lockGroupLink);
2618 rhaas 846 GIC 1353 : if (dlist_is_empty(&leader->lockGroupMembers))
2618 rhaas 847 ECB : {
2618 rhaas 848 CBC 55 : leader->lockGroupLeader = NULL;
2618 rhaas 849 GIC 55 : if (leader != MyProc)
2618 rhaas 850 EUB : {
2618 rhaas 851 UIC 0 : procgloballist = leader->procgloballist;
852 :
2618 rhaas 853 EUB : /* Leader exited first; return its PGPROC. */
2618 rhaas 854 UBC 0 : SpinLockAcquire(ProcStructLock);
81 andres 855 UNC 0 : dlist_push_head(procgloballist, &leader->links);
2618 rhaas 856 UIC 0 : SpinLockRelease(ProcStructLock);
2618 rhaas 857 ECB : }
858 : }
2618 rhaas 859 CBC 1298 : else if (leader != MyProc)
2618 rhaas 860 GIC 1298 : MyProc->lockGroupLeader = NULL;
861 1353 : LWLockRelease(leader_lwlock);
862 : }
863 :
864 : /*
865 : * Reset MyLatch to the process local one. This is so that signal
866 : * handlers et al can continue using the latch after the shared latch
867 : * isn't ours anymore.
868 : *
869 : * Similarly, stop reporting wait events to MyProc->wait_event_info.
870 : *
736 andres 871 ECB : * After that clear MyProc and disown the shared latch.
3355 rhaas 872 : */
3007 andres 873 GIC 11511 : SwitchBackToLocalLatch();
736 andres 874 CBC 11511 : pgstat_reset_wait_event_storage();
736 andres 875 ECB :
3355 rhaas 876 CBC 11511 : proc = MyProc;
3355 rhaas 877 GIC 11511 : MyProc = NULL;
3355 rhaas 878 CBC 11511 : DisownLatch(&proc->procLatch);
4260 tgl 879 ECB :
2812 rhaas 880 GIC 11511 : procgloballist = proc->procgloballist;
7862 tgl 881 11511 : SpinLockAcquire(ProcStructLock);
882 :
883 : /*
884 : * If we're still a member of a locking group, that means we're a leader
885 : * which has somehow exited before its children. The last remaining child
2618 rhaas 886 ECB : * will release our PGPROC. Otherwise, release it now.
887 : */
2618 rhaas 888 GIC 11511 : if (proc->lockGroupLeader == NULL)
2618 rhaas 889 ECB : {
890 : /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
2618 rhaas 891 GIC 11511 : Assert(dlist_is_empty(&proc->lockGroupMembers));
2618 rhaas 892 ECB :
893 : /* Return PGPROC structure (and semaphore) to appropriate freelist */
81 andres 894 GNC 11511 : dlist_push_tail(procgloballist, &proc->links);
2618 rhaas 895 ECB : }
896 :
6389 tgl 897 : /* Update shared estimate of spins_per_delay */
2732 rhaas 898 GIC 11511 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
899 :
7862 tgl 900 11511 : SpinLockRelease(ProcStructLock);
901 :
902 : /*
903 : * This process is no longer present in shared memory in any meaningful
4790 bruce 904 ECB : * way, so tell the postmaster we've cleaned up acceptably well. (XXX
905 : * autovac launcher should be included here someday)
906 : */
4969 tgl 907 GIC 11511 : if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
5087 tgl 908 CBC 10586 : MarkPostmasterChildInactive();
5087 tgl 909 ECB :
5837 alvherre 910 : /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
5837 alvherre 911 GIC 11511 : if (AutovacuumLauncherPid != 0)
4969 tgl 912 16 : kill(AutovacuumLauncherPid, SIGUSR2);
7862 913 11511 : }
914 :
915 : /*
916 : * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
917 : * processes (bgwriter, etc). The PGPROC and sema are not released, only
5877 alvherre 918 ECB : * marked as not-in-use.
919 : */
7862 tgl 920 : static void
5877 alvherre 921 GIC 1784 : AuxiliaryProcKill(int code, Datum arg)
922 : {
6797 bruce 923 1784 : int proctype = DatumGetInt32(arg);
4036 peter_e 924 ECB : PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
925 : PGPROC *proc;
7081 JanWieck 926 :
5877 alvherre 927 GIC 1784 : Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
7081 JanWieck 928 ECB :
5877 alvherre 929 GIC 1784 : auxproc = &AuxiliaryProcs[proctype];
930 :
5877 alvherre 931 CBC 1784 : Assert(MyProc == auxproc);
932 :
933 : /* Release any LW locks I am holding (see notes above) */
7862 tgl 934 1784 : LWLockReleaseAll();
935 :
936 : /* Cancel any pending condition variable sleep, too */
2329 rhaas 937 1784 : ConditionVariableCancelSleep();
2329 rhaas 938 ECB :
939 : /* look at the equivalent ProcKill() code for comments */
3007 andres 940 CBC 1784 : SwitchBackToLocalLatch();
736 941 1784 : pgstat_reset_wait_event_storage();
736 andres 942 ECB :
3355 rhaas 943 GIC 1784 : proc = MyProc;
3355 rhaas 944 CBC 1784 : MyProc = NULL;
3355 rhaas 945 GIC 1784 : DisownLatch(&proc->procLatch);
946 :
6389 tgl 947 CBC 1784 : SpinLockAcquire(ProcStructLock);
948 :
949 : /* Mark auxiliary proc no longer in use */
3355 rhaas 950 1784 : proc->pid = 0;
951 :
6389 tgl 952 ECB : /* Update shared estimate of spins_per_delay */
6389 tgl 953 CBC 1784 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
954 :
6389 tgl 955 GIC 1784 : SpinLockRelease(ProcStructLock);
9770 scrappy 956 1784 : }
957 :
958 : /*
959 : * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
960 : * given its PID
961 : *
2205 rhaas 962 ECB : * Returns NULL if not found.
963 : */
964 : PGPROC *
2205 rhaas 965 GIC 1700 : AuxiliaryPidGetProc(int pid)
966 : {
2205 rhaas 967 CBC 1700 : PGPROC *result = NULL;
2205 rhaas 968 EUB : int index;
969 :
2205 rhaas 970 CBC 1700 : if (pid == 0) /* never match dummy PGPROCs */
2205 rhaas 971 UIC 0 : return NULL;
2205 rhaas 972 ECB :
2205 rhaas 973 GIC 3458 : for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
2205 rhaas 974 ECB : {
2205 rhaas 975 GIC 3458 : PGPROC *proc = &AuxiliaryProcs[index];
2205 rhaas 976 ECB :
2205 rhaas 977 CBC 3458 : if (proc->pid == pid)
978 : {
2205 rhaas 979 GIC 1700 : result = proc;
2205 rhaas 980 CBC 1700 : break;
981 : }
982 : }
2205 rhaas 983 GIC 1700 : return result;
984 : }
985 :
986 :
987 : /*
988 : * ProcSleep -- put a process to sleep on the specified lock
989 : *
8109 tgl 990 ECB : * Caller must have set MyProc->heldLocks to reflect locks already held
991 : * on the lockable object by this process (under all XIDs).
992 : *
993 : * The lock table's partition lock must be held at entry, and will be held
8120 994 : * at exit.
995 : *
996 : * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
997 : *
9770 scrappy 998 : * ASSUME: that no one will fiddle with the queue until after
999 : * we release the partition lock.
1000 : *
1001 : * NOTES: The process queue is now a priority queue for locking.
1002 : */
1003 : ProcWaitStatus
6328 tgl 1004 GIC 1030 : ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1005 : {
1006 1030 : LOCKMODE lockmode = locallock->tag.mode;
1007 1030 : LOCK *lock = locallock->lock;
1008 1030 : PROCLOCK *proclock = locallock->proclock;
6104 1009 1030 : uint32 hashcode = locallock->hashcode;
3359 rhaas 1010 1030 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
81 andres 1011 GNC 1030 : dclist_head *waitQueue = &lock->waitProcs;
1012 1030 : PGPROC *insert_before = NULL;
7069 bruce 1013 GIC 1030 : LOCKMASK myHeldLocks = MyProc->heldLocks;
821 fujii 1014 1030 : TimestampTz standbyWaitStart = 0;
7887 tgl 1015 1030 : bool early_deadlock = false;
5624 bruce 1016 1030 : bool allow_autovacuum_cancel = true;
821 fujii 1017 1030 : bool logged_recovery_conflict = false;
1018 : ProcWaitStatus myWaitStatus;
2618 rhaas 1019 1030 : PGPROC *leader = MyProc->lockGroupLeader;
1020 :
2618 rhaas 1021 ECB : /*
1022 : * If group locking is in use, locks held by members of my locking group
1115 akapila 1023 : * need to be included in myHeldLocks. This is not required for relation
1024 : * extension or page locks which conflict among group members. However,
1025 : * including them in myHeldLocks will give group members the priority to
1114 1026 : * get those locks as compared to other backends which are also trying to
1027 : * acquire those locks. OTOH, we can avoid giving priority to group
1028 : * members for that kind of locks, but there doesn't appear to be a clear
1029 : * advantage of the same.
1030 : */
2618 rhaas 1031 GIC 1030 : if (leader != NULL)
1032 : {
1033 : dlist_iter iter;
2618 rhaas 1034 ECB :
81 andres 1035 GNC 30 : dlist_foreach(iter, &lock->procLocks)
2618 rhaas 1036 ECB : {
1037 : PROCLOCK *otherproclock;
1038 :
81 andres 1039 GNC 23 : otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1040 :
2618 rhaas 1041 GIC 23 : if (otherproclock->groupLeader == leader)
1042 11 : myHeldLocks |= otherproclock->holdMask;
1043 : }
1044 : }
1045 :
1046 : /*
1047 : * Determine where to add myself in the wait queue.
1048 : *
6347 bruce 1049 ECB : * Normally I should go at the end of the queue. However, if I already
1050 : * hold locks that conflict with the request of any previous waiter, put
1051 : * myself in the queue just in front of the first such waiter. This is not
1052 : * a necessary step, since deadlock detection would move me to before that
1053 : * waiter anyway; but it's relatively cheap to detect such a conflict
6385 1054 : * immediately, and avoid delaying till deadlock timeout.
8109 tgl 1055 : *
1056 : * Special case: if I find I should go in front of some waiter, check to
1057 : * see if I conflict with already-held locks or the requests before that
1058 : * waiter. If not, then just grant myself the requested lock immediately.
6385 bruce 1059 : * This is the same as the test for immediate grant in LockAcquire, except
1060 : * we are only considering the part of the wait queue before my insertion
1061 : * point.
1062 : */
81 andres 1063 GNC 1030 : if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1064 : {
7069 bruce 1065 GBC 5 : LOCKMASK aheadRequests = 0;
1066 : dlist_iter iter;
1067 :
81 andres 1068 GNC 5 : dclist_foreach(iter, waitQueue)
1069 : {
1070 5 : PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1071 :
1072 : /*
1073 : * If we're part of the same locking group as this waiter, its
1074 : * locks neither conflict with ours nor contribute to
2604 rhaas 1075 ECB : * aheadRequests.
2618 rhaas 1076 EUB : */
2618 rhaas 1077 GIC 5 : if (leader != NULL && leader == proc->lockGroupLeader)
2618 rhaas 1078 LBC 0 : continue;
1079 :
1080 : /* Must he wait for me? */
7570 bruce 1081 CBC 5 : if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
8738 vadim4o 1082 ECB : {
8109 tgl 1083 : /* Must I wait for him ? */
7570 bruce 1084 GIC 5 : if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
8109 tgl 1085 ECB : {
1086 : /*
1087 : * Yes, so we have a deadlock. Easiest way to clean up
1088 : * correctly is to call RemoveFromWaitQueue(), but we
1089 : * can't do that until we are *on* the wait queue. So, set
1090 : * a flag to check below, and break out of loop. Also,
6385 bruce 1091 : * record deadlock info for later message.
1092 : */
7388 tgl 1093 CBC 1 : RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
7887 1094 1 : early_deadlock = true;
7887 tgl 1095 GIC 1 : break;
1096 : }
1097 : /* I must go before this waiter. Check special case. */
7570 bruce 1098 CBC 4 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1197 peter 1099 GIC 4 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1100 : proclock))
1101 : {
1102 : /* Skip the wait and just grant myself the lock. */
7355 bruce 1103 4 : GrantLock(lock, proclock, lockmode);
6799 tgl 1104 4 : GrantAwaitedLock();
1026 peter 1105 4 : return PROC_WAIT_STATUS_OK;
1106 : }
1107 :
1108 : /* Put myself into wait queue before conflicting process */
81 andres 1109 UNC 0 : insert_before = proc;
8738 vadim4o 1110 LBC 0 : break;
1111 : }
1112 : /* Nope, so advance to next waiter */
7069 bruce 1113 UIC 0 : aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1114 : }
1115 : }
1116 :
1117 : /*
1118 : * Insert self into queue, at the position determined above.
1119 : */
81 andres 1120 GNC 1026 : if (insert_before)
81 andres 1121 UNC 0 : dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1122 : else
81 andres 1123 GNC 1026 : dclist_push_tail(waitQueue, &MyProc->links);
1124 :
7069 bruce 1125 GIC 1026 : lock->waitMask |= LOCKBIT_ON(lockmode);
1126 :
1127 : /* Set up wait information in PGPROC object, too */
8112 tgl 1128 1026 : MyProc->waitLock = lock;
6799 1129 1026 : MyProc->waitProcLock = proclock;
8112 tgl 1130 CBC 1026 : MyProc->waitLockMode = lockmode;
1131 :
1026 peter 1132 1026 : MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
1133 :
1134 : /*
1135 : * If we detected deadlock, give up without waiting. This must agree with
1388 tmunro 1136 ECB : * CheckDeadLock's recovery code.
7887 tgl 1137 : */
7887 tgl 1138 CBC 1026 : if (early_deadlock)
7887 tgl 1139 ECB : {
6104 tgl 1140 CBC 1 : RemoveFromWaitQueue(MyProc, hashcode);
1026 peter 1141 1 : return PROC_WAIT_STATUS_ERROR;
7887 tgl 1142 ECB : }
1143 :
1144 : /* mark that we are waiting for a lock */
6328 tgl 1145 CBC 1025 : lockAwaited = locallock;
1146 :
1147 : /*
1148 : * Release the lock table's partition lock.
1149 : *
1150 : * NOTE: this may also cause us to exit critical-section state, possibly
1151 : * allowing a cancel/die interrupt to be accepted. This is OK because we
1152 : * have recorded the fact that we are waiting for a lock, and so
1153 : * LockErrorCleanup will clean up if cancel/die happens.
1154 : */
6328 tgl 1155 GIC 1025 : LWLockRelease(partitionLock);
1156 :
1157 : /*
1158 : * Also, now that we will successfully clean up after an ereport, it's
1159 : * safe to check to see if there's a buffer pin deadlock against the
1160 : * Startup process. Of course, that's only necessary if we're doing Hot
3955 bruce 1161 ECB : * Standby and are not the Startup process ourselves.
4268 tgl 1162 : */
4268 tgl 1163 GIC 1025 : if (RecoveryInProgress() && !InRecovery)
4268 tgl 1164 CBC 1 : CheckRecoveryConflictDeadlock();
1165 :
1166 : /* Reset deadlock_state before enabling the timeout handler */
5773 tgl 1167 GIC 1025 : deadlock_state = DS_NOT_YET_CHECKED;
2987 andres 1168 1025 : got_deadlock_timeout = false;
1169 :
8053 bruce 1170 ECB : /*
1171 : * Set timer so we can wake up after awhile and check for a deadlock. If a
1172 : * deadlock is detected, the handler sets MyProc->waitStatus =
1173 : * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1174 : * rather than success.
1175 : *
1176 : * By delaying the check until we've waited for a bit, we can avoid
1177 : * running the rather expensive deadlock-check code in most cases.
1178 : *
1179 : * If LockTimeout is set, also enable the timeout for that. We can save a
1180 : * few cycles by enabling both timeout sources in one call.
1181 : *
1182 : * If InHotStandby we set lock waits slightly later for clarity with other
1183 : * code.
1184 : */
2586 simon 1185 GIC 1025 : if (!InHotStandby)
1186 : {
1187 1024 : if (LockTimeout > 0)
1188 : {
2586 simon 1189 ECB : EnableTimeoutParams timeouts[2];
1190 :
2586 simon 1191 CBC 119 : timeouts[0].id = DEADLOCK_TIMEOUT;
1192 119 : timeouts[0].type = TMPARAM_AFTER;
2586 simon 1193 GIC 119 : timeouts[0].delay_ms = DeadlockTimeout;
1194 119 : timeouts[1].id = LOCK_TIMEOUT;
2586 simon 1195 CBC 119 : timeouts[1].type = TMPARAM_AFTER;
2586 simon 1196 GIC 119 : timeouts[1].delay_ms = LockTimeout;
1197 119 : enable_timeouts(timeouts, 2);
1198 : }
1199 : else
1200 905 : enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
1201 :
783 fujii 1202 ECB : /*
1203 : * Use the current time obtained for the deadlock timeout timer as
1204 : * waitStart (i.e., the time when this process started waiting for the
1205 : * lock). Since getting the current time newly can cause overhead, we
1206 : * reuse the already-obtained time to avoid that overhead.
1207 : *
1208 : * Note that waitStart is updated without holding the lock table's
1209 : * partition lock, to avoid the overhead by additional lock
1210 : * acquisition. This can cause "waitstart" in pg_locks to become NULL
1211 : * for a very short period of time after the wait started even though
1212 : * "granted" is false. This is OK in practice because we can assume
1213 : * that users are likely to look at "waitstart" when waiting for the
1214 : * lock for a long time.
1215 : */
783 fujii 1216 GIC 1024 : pg_atomic_write_u64(&MyProc->waitStart,
1217 1024 : get_timeout_start_time(DEADLOCK_TIMEOUT));
1218 : }
821 1219 1 : else if (log_recovery_conflict_waits)
1220 : {
1221 : /*
821 fujii 1222 ECB : * Set the wait start timestamp if logging is enabled and in hot
1223 : * standby.
1224 : */
821 fujii 1225 CBC 1 : standbyWaitStart = GetCurrentTimestamp();
1226 : }
1227 :
1228 : /*
1229 : * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1230 : * will not wait. But a set latch does not necessarily mean that the lock
2987 andres 1231 ECB : * is free now, as there are many other sources for latch sets than
1232 : * somebody releasing the lock.
8120 tgl 1233 : *
1234 : * We process interrupts whenever the latch has been set, so cancel/die
2987 andres 1235 : * interrupts are processed quickly. This means we must not mind losing
1236 : * control to a cancel/die interrupt here. We don't, because we have no
1237 : * shared-state-change work to do after being granted the lock (the
1238 : * grantor did it all). We do have to worry about canceling the deadlock
1239 : * timeout and updating the locallock table, but if we lose control to an
1240 : * error, LockErrorCleanup will fix that up.
1241 : */
1242 : do
1243 : {
2586 simon 1244 GIC 1650 : if (InHotStandby)
1245 : {
821 fujii 1246 3 : bool maybe_log_conflict =
1247 3 : (standbyWaitStart != 0 && !logged_recovery_conflict);
821 fujii 1248 ECB :
1249 : /* Set a timer and wait for that or for the lock to be granted */
821 fujii 1250 GIC 3 : ResolveRecoveryConflictWithLock(locallock->tag.lock,
1251 : maybe_log_conflict);
1252 :
1253 : /*
821 fujii 1254 ECB : * Emit the log message if the startup process is waiting longer
1255 : * than deadlock_timeout for recovery conflict on lock.
821 fujii 1256 EUB : */
821 fujii 1257 GIC 3 : if (maybe_log_conflict)
1258 : {
1259 1 : TimestampTz now = GetCurrentTimestamp();
1260 :
1261 1 : if (TimestampDifferenceExceeds(standbyWaitStart, now,
1262 : DeadlockTimeout))
1263 : {
1264 : VirtualTransactionId *vxids;
1265 : int cnt;
1266 :
1267 1 : vxids = GetLockConflicts(&locallock->tag.lock,
1268 : AccessExclusiveLock, &cnt);
1269 :
821 fujii 1270 EUB : /*
1271 : * Log the recovery conflict and the list of PIDs of
1272 : * backends holding the conflicting lock. Note that we do
1273 : * logging even if there are no such backends right now
1274 : * because the startup process here has already waited
1275 : * longer than deadlock_timeout.
1276 : */
821 fujii 1277 GIC 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1278 : standbyWaitStart, now,
816 1279 1 : cnt > 0 ? vxids : NULL, true);
821 fujii 1280 GBC 1 : logged_recovery_conflict = true;
821 fujii 1281 EUB : }
1282 : }
2586 simon 1283 : }
1284 : else
1285 : {
1598 tmunro 1286 GBC 1647 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1598 tmunro 1287 GIC 1647 : PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
2586 simon 1288 1647 : ResetLatch(MyLatch);
1289 : /* check for deadlocks first, as that's probably log-worthy */
1290 1647 : if (got_deadlock_timeout)
2586 simon 1291 EUB : {
2586 simon 1292 GBC 26 : CheckDeadLock();
1293 26 : got_deadlock_timeout = false;
2586 simon 1294 EUB : }
2586 simon 1295 GIC 1647 : CHECK_FOR_INTERRUPTS();
1296 : }
1297 :
1298 : /*
1299 : * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
697 tgl 1300 EUB : * else asynchronously. Read it just once per loop to prevent
1301 : * surprising behavior (such as missing log messages).
1302 : */
1026 peter 1303 GIC 1608 : myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1304 :
5644 alvherre 1305 EUB : /*
1306 : * If we are not deadlocked, but are waiting on an autovacuum-induced
1307 : * task, send a signal to interrupt it.
1308 : */
5644 alvherre 1309 GIC 1608 : if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
5644 alvherre 1310 EUB : {
5624 bruce 1311 UIC 0 : PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1312 : uint8 statusFlags;
1313 : uint8 lockmethod_copy;
1314 : LOCKTAG locktag_copy;
1315 :
1316 : /*
1317 : * Grab info we need, then release lock immediately. Note this
1318 : * coding means that there is a tiny chance that the process
1319 : * terminates its current transaction and starts a different one
1320 : * before we have a change to send the signal; the worst possible
1321 : * consequence is that a for-wraparound vacuum is cancelled. But
867 alvherre 1322 EUB : * that could happen in any case unless we were to do kill() with
1323 : * the lock held, which is much more undesirable.
1324 : */
5644 alvherre 1325 UIC 0 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
867 1326 0 : statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1327 0 : lockmethod_copy = lock->tag.locktag_lockmethodid;
1328 0 : locktag_copy = lock->tag;
1329 0 : LWLockRelease(ProcArrayLock);
5644 alvherre 1330 EUB :
1331 : /*
1332 : * Only do it if the worker is not working to protect against Xid
1333 : * wraparound.
1334 : */
874 alvherre 1335 UIC 0 : if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1336 0 : !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
5644 alvherre 1337 ECB : {
5624 bruce 1338 UIC 0 : int pid = autovac->pid;
1339 :
1340 : /* report the case, if configured to do so */
867 tgl 1341 0 : if (message_level_is_interesting(DEBUG1))
1342 : {
1343 : StringInfoData locktagbuf;
1344 : StringInfoData logbuf; /* errdetail for server log */
1345 :
1346 0 : initStringInfo(&locktagbuf);
1347 0 : initStringInfo(&logbuf);
867 tgl 1348 LBC 0 : DescribeLockTag(&locktagbuf, &locktag_copy);
1349 0 : appendStringInfo(&logbuf,
781 peter 1350 ECB : "Process %d waits for %s on %s.",
1351 : MyProcPid,
867 tgl 1352 : GetLockmodeName(lockmethod_copy, lockmode),
1353 : locktagbuf.data);
1354 :
867 tgl 1355 UIC 0 : ereport(DEBUG1,
781 peter 1356 ECB : (errmsg_internal("sending cancel to blocking autovacuum PID %d",
697 tgl 1357 : pid),
1358 : errdetail_log("%s", logbuf.data)));
867 1359 :
867 tgl 1360 UIC 0 : pfree(locktagbuf.data);
1361 0 : pfree(logbuf.data);
867 tgl 1362 ECB : }
alvherre 1363 :
1364 : /* send the autovacuum worker Back to Old Kent Road */
5644 alvherre 1365 UIC 0 : if (kill(pid, SIGINT) < 0)
1366 : {
1367 : /*
1368 : * There's a race condition here: once we release the
1369 : * ProcArrayLock, it's possible for the autovac worker to
1370 : * close up shop and exit before we can do the kill().
1371 : * Therefore, we do not whinge about no-such-process.
1372 : * Other errors such as EPERM could conceivably happen if
1373 : * the kernel recycles the PID fast enough, but such cases
2812 tgl 1374 ECB : * seem improbable enough that it's probably best to issue
1375 : * a warning if we see some other errno.
1376 : */
2812 tgl 1377 UIC 0 : if (errno != ESRCH)
2812 tgl 1378 LBC 0 : ereport(WARNING,
2118 tgl 1379 ECB : (errmsg("could not send signal to process %d: %m",
1380 : pid)));
1381 : }
1382 : }
1383 :
1384 : /* prevent signal from being sent again more than once */
5644 alvherre 1385 LBC 0 : allow_autovacuum_cancel = false;
1386 : }
5644 alvherre 1387 ECB :
1388 : /*
5773 tgl 1389 : * If awoken after the deadlock check interrupt has run, and
1390 : * log_lock_waits is on, then report about the wait.
1391 : */
5703 tgl 1392 GIC 1608 : if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1393 : {
3314 fujii 1394 ECB : StringInfoData buf,
1395 : lock_waiters_sbuf,
1396 : lock_holders_sbuf;
1397 : const char *modename;
1398 : long secs;
5703 tgl 1399 : int usecs;
1400 : long msecs;
1401 : dlist_iter proc_iter;
1402 : PROCLOCK *curproclock;
3314 fujii 1403 CBC 23 : bool first_holder = true,
3314 fujii 1404 GIC 23 : first_waiter = true;
1405 23 : int lockHoldersNum = 0;
5703 tgl 1406 ECB :
5703 tgl 1407 CBC 23 : initStringInfo(&buf);
3314 fujii 1408 GIC 23 : initStringInfo(&lock_waiters_sbuf);
3314 fujii 1409 CBC 23 : initStringInfo(&lock_holders_sbuf);
1410 :
5703 tgl 1411 GIC 23 : DescribeLockTag(&buf, &locallock->tag.lock);
1412 23 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
5703 tgl 1413 ECB : lockmode);
3919 alvherre 1414 GIC 23 : TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
3919 alvherre 1415 ECB : GetCurrentTimestamp(),
5703 tgl 1416 : &secs, &usecs);
5703 tgl 1417 GIC 23 : msecs = secs * 1000 + usecs / 1000;
1418 23 : usecs = usecs % 1000;
1419 :
1420 : /*
1421 : * we loop over the lock's procLocks to gather a list of all
3314 fujii 1422 ECB : * holders and waiters. Thus we will be able to provide more
1423 : * detailed information for lock debugging purposes.
1424 : *
1425 : * lock->procLocks contains all processes which hold or wait for
1426 : * this lock.
1427 : */
1428 :
3314 fujii 1429 GIC 23 : LWLockAcquire(partitionLock, LW_SHARED);
1430 :
81 andres 1431 GNC 75 : dlist_foreach(proc_iter, &lock->procLocks)
1432 : {
1433 52 : curproclock =
1434 52 : dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1435 :
1436 : /*
1437 : * we are a waiter if myProc->waitProcLock == curproclock; we
1438 : * are a holder if it is NULL or something different
3314 fujii 1439 ECB : */
186 drowley 1440 GNC 52 : if (curproclock->tag.myProc->waitProcLock == curproclock)
1441 : {
3314 fujii 1442 GIC 23 : if (first_waiter)
1443 : {
1444 13 : appendStringInfo(&lock_waiters_sbuf, "%d",
186 drowley 1445 GNC 13 : curproclock->tag.myProc->pid);
3314 fujii 1446 CBC 13 : first_waiter = false;
1447 : }
1448 : else
3314 fujii 1449 GIC 10 : appendStringInfo(&lock_waiters_sbuf, ", %d",
186 drowley 1450 GNC 10 : curproclock->tag.myProc->pid);
3314 fujii 1451 ECB : }
1452 : else
1453 : {
3314 fujii 1454 GIC 29 : if (first_holder)
1455 : {
1456 23 : appendStringInfo(&lock_holders_sbuf, "%d",
186 drowley 1457 GNC 23 : curproclock->tag.myProc->pid);
3314 fujii 1458 GIC 23 : first_holder = false;
1459 : }
1460 : else
1461 6 : appendStringInfo(&lock_holders_sbuf, ", %d",
186 drowley 1462 GNC 6 : curproclock->tag.myProc->pid);
3314 fujii 1463 EUB :
3314 fujii 1464 GIC 29 : lockHoldersNum++;
1465 : }
1466 : }
1467 :
1468 23 : LWLockRelease(partitionLock);
1469 :
5703 tgl 1470 23 : if (deadlock_state == DS_SOFT_DEADLOCK)
1471 3 : ereport(LOG,
5703 tgl 1472 ECB : (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1473 : MyProcPid, modename, buf.data, msecs, usecs),
3314 fujii 1474 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
2118 tgl 1475 : "Processes holding the lock: %s. Wait queue: %s.",
3314 fujii 1476 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
5703 tgl 1477 GIC 20 : else if (deadlock_state == DS_HARD_DEADLOCK)
5773 tgl 1478 ECB : {
1479 : /*
1480 : * This message is a bit redundant with the error that will be
1481 : * reported subsequently, but in some cases the error report
1482 : * might not make it to the log (eg, if it's caught by an
1483 : * exception handler), and we want to ensure all long-wait
1484 : * events get logged.
1485 : */
5703 tgl 1486 CBC 1 : ereport(LOG,
1487 : (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
3314 fujii 1488 ECB : MyProcPid, modename, buf.data, msecs, usecs),
1489 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1490 : "Processes holding the lock: %s. Wait queue: %s.",
1491 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
5773 tgl 1492 : }
5703 1493 :
1026 peter 1494 CBC 23 : if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
5703 tgl 1495 12 : ereport(LOG,
5703 tgl 1496 ECB : (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1497 : MyProcPid, modename, buf.data, msecs, usecs),
1498 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
2118 1499 : "Processes holding the lock: %s. Wait queue: %s.",
1500 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1026 peter 1501 GIC 11 : else if (myWaitStatus == PROC_WAIT_STATUS_OK)
5703 tgl 1502 10 : ereport(LOG,
1503 : (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1504 : MyProcPid, modename, buf.data, msecs, usecs)));
1505 : else
5703 tgl 1506 ECB : {
1026 peter 1507 CBC 1 : Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1508 :
1509 : /*
1510 : * Currently, the deadlock checker always kicks its own
1511 : * process, which means that we'll only see
1512 : * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1513 : * DS_HARD_DEADLOCK, and there's no need to print redundant
1514 : * messages. But for completeness and future-proofing, print
1515 : * a message if it looks like someone else kicked us off the
697 tgl 1516 ECB : * lock.
1517 : */
5703 tgl 1518 GIC 1 : if (deadlock_state != DS_HARD_DEADLOCK)
5703 tgl 1519 UIC 0 : ereport(LOG,
1520 : (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
2118 tgl 1521 ECB : MyProcPid, modename, buf.data, msecs, usecs),
1522 : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1523 : "Processes holding the lock: %s. Wait queue: %s.",
1524 : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1525 : }
5703 1526 :
1527 : /*
1528 : * At this point we might still need to wait for the lock. Reset
1529 : * state so we don't print the above messages again.
1530 : */
5703 tgl 1531 GIC 23 : deadlock_state = DS_NO_DEADLOCK;
1532 :
5703 tgl 1533 CBC 23 : pfree(buf.data);
3314 fujii 1534 GIC 23 : pfree(lock_holders_sbuf.data);
1535 23 : pfree(lock_waiters_sbuf.data);
1536 : }
1026 peter 1537 1608 : } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1538 :
1539 : /*
1540 : * Disable the timers, if they are still running. As in LockErrorCleanup,
1541 : * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1542 : * already caused QueryCancelPending to become set, we want the cancel to
1543 : * be reported as a lock timeout, not a user cancel.
1544 : */
2586 simon 1545 983 : if (!InHotStandby)
1546 : {
1547 982 : if (LockTimeout > 0)
1548 : {
1549 : DisableTimeoutParams timeouts[2];
3676 tgl 1550 ECB :
2586 simon 1551 GIC 113 : timeouts[0].id = DEADLOCK_TIMEOUT;
2586 simon 1552 CBC 113 : timeouts[0].keep_indicator = false;
2586 simon 1553 GBC 113 : timeouts[1].id = LOCK_TIMEOUT;
2586 simon 1554 GIC 113 : timeouts[1].keep_indicator = true;
2586 simon 1555 CBC 113 : disable_timeouts(timeouts, 2);
1556 : }
1557 : else
1558 869 : disable_timeout(DEADLOCK_TIMEOUT, false);
1559 : }
1560 :
816 fujii 1561 ECB : /*
1562 : * Emit the log message if recovery conflict on lock was resolved but the
1563 : * startup process waited longer than deadlock_timeout for it.
1564 : */
816 fujii 1565 GIC 983 : if (InHotStandby && logged_recovery_conflict)
1566 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
816 fujii 1567 ECB : standbyWaitStart, GetCurrentTimestamp(),
1568 : NULL, false);
1569 :
1570 : /*
1571 : * Re-acquire the lock table's partition lock. We have to do this to hold
1572 : * off cancel/die interrupts before we can mess with lockAwaited (else we
1573 : * might have a missed or duplicated locallock update).
1574 : */
6328 tgl 1575 GIC 983 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1576 :
1577 : /*
4008 rhaas 1578 ECB : * We no longer want LockErrorCleanup to do anything.
1579 : */
6328 tgl 1580 CBC 983 : lockAwaited = NULL;
8120 tgl 1581 ECB :
1582 : /*
1583 : * If we got the lock, be sure to remember it in the locallock table.
9345 bruce 1584 : */
1026 peter 1585 CBC 983 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
6799 tgl 1586 GIC 979 : GrantAwaitedLock();
9345 bruce 1587 ECB :
1588 : /*
8112 tgl 1589 : * We don't have to do anything else, because the awaker did all the
1590 : * necessary update of the lock table and MyProc.
1591 : */
6840 tgl 1592 GIC 983 : return MyProc->waitStatus;
1593 : }
1594 :
1595 :
9770 scrappy 1596 ECB : /*
1388 tmunro 1597 : * ProcWakeup -- wake up a process by setting its latch.
1598 : *
1599 : * Also remove the process from the wait queue and set its links invalid.
7887 tgl 1600 : *
1601 : * The appropriate lock partition lock must be held by caller.
6328 1602 : *
1603 : * XXX: presently, this code is only used for the "success" case, and only
1604 : * works correctly for that case. To clean up in failure case, would need
1605 : * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1606 : * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1607 : */
1608 : void
1026 peter 1609 GIC 983 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
9770 scrappy 1610 ECB : {
81 andres 1611 GNC 983 : if (dlist_node_is_detached(&proc->links))
81 andres 1612 UNC 0 : return;
1613 :
1026 peter 1614 GIC 983 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1615 :
1616 : /* Remove process from wait queue */
81 andres 1617 GNC 983 : dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
9345 bruce 1618 ECB :
1619 : /* Clean up process' state and pass it the ok/fail signal */
8112 tgl 1620 GIC 983 : proc->waitLock = NULL;
6799 1621 983 : proc->waitProcLock = NULL;
6840 1622 983 : proc->waitStatus = waitStatus;
783 fujii 1623 983 : pg_atomic_write_u64(&MyProc->waitStart, 0);
1624 :
1625 : /* And awaken it */
2987 andres 1626 983 : SetLatch(&proc->procLatch);
1627 : }
1628 :
1629 : /*
9770 scrappy 1630 ECB : * ProcLockWakeup -- routine for waking up processes when a lock is
8109 tgl 1631 : * released (or a prior waiter is aborted). Scan all waiters
1632 : * for lock, waken any that are no longer blocked.
1633 : *
1634 : * The appropriate lock partition lock must be held by caller.
1635 : */
1636 : void
7069 bruce 1637 GIC 998 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1638 : {
81 andres 1639 GNC 998 : dclist_head *waitQueue = &lock->waitProcs;
7069 bruce 1640 GIC 998 : LOCKMASK aheadRequests = 0;
1641 : dlist_mutable_iter miter;
8993 scrappy 1642 ECB :
81 andres 1643 GNC 998 : if (dclist_is_empty(waitQueue))
8109 tgl 1644 GIC 44 : return;
1645 :
81 andres 1646 GNC 1979 : dclist_foreach_modify(miter, waitQueue)
1647 : {
1648 1025 : PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
8053 bruce 1649 CBC 1025 : LOCKMODE lockmode = proc->waitLockMode;
1650 :
8993 scrappy 1651 ECB : /*
1652 : * Waken if (a) doesn't conflict with requests of earlier waiters, and
1653 : * (b) doesn't conflict with already-held locks.
1654 : */
7570 bruce 1655 GIC 1025 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1197 peter 1656 1023 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1657 : proc->waitProcLock))
1658 : {
1659 : /* OK to waken */
6799 tgl 1660 983 : GrantLock(lock, proc->waitProcLock, lockmode);
1661 : /* removes proc from the lock's waiting process queue */
81 andres 1662 GNC 983 : ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1663 : }
1664 : else
1665 : {
1666 : /*
1667 : * Lock conflicts: Don't wake, but remember requested mode for
1668 : * later checks.
1669 : */
7069 bruce 1670 GIC 42 : aheadRequests |= LOCKBIT_ON(lockmode);
1671 : }
1672 : }
1673 : }
1674 :
1675 : /*
1676 : * CheckDeadLock
1677 : *
1678 : * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1679 : * lock to be released by some other process. Check if there's a deadlock; if
2987 andres 1680 ECB : * not, just return. (But signal ProcSleep to log a message, if
1681 : * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1682 : * the lock's wait queue and signal an error to ProcSleep.
9770 scrappy 1683 : */
1684 : static void
7575 bruce 1685 GIC 26 : CheckDeadLock(void)
1686 : {
1687 : int i;
1688 :
1689 : /*
1690 : * Acquire exclusive lock on the entire shared lock data structures. Must
6031 bruce 1691 ECB : * grab LWLocks in partition-number order to avoid LWLock deadlock.
1692 : *
6328 tgl 1693 : * Note that the deadlock check interrupt had better not be enabled
1694 : * anywhere that this process itself holds lock partition locks, else this
1695 : * will wait forever. Also note that LWLockAcquire creates a critical
1696 : * section, so that this routine cannot be interrupted by cancel/die
1697 : * interrupts.
1698 : */
6328 tgl 1699 GIC 442 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3359 rhaas 1700 416 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
1701 :
1702 : /*
1703 : * Check to see if we've been awoken by anyone in the interim.
1704 : *
5417 neilc 1705 ECB : * If we have, we can return and resume our transaction -- happy day.
5050 bruce 1706 : * Before we are awoken the process releasing the lock grants it to us so
1707 : * we know that we don't have to wait anymore.
1708 : *
1709 : * We check by looking to see if we've been unlinked from the wait queue.
1710 : * This is safe because we hold the lock partition lock.
1711 : */
5271 tgl 1712 GIC 26 : if (MyProc->links.prev == NULL ||
1713 26 : MyProc->links.next == NULL)
5773 tgl 1714 UIC 0 : goto check_done;
1715 :
1716 : #ifdef LOCK_DEBUG
5773 tgl 1717 ECB : if (Debug_deadlocks)
1718 : DumpAllLocks();
1719 : #endif
1720 :
1721 : /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
5773 tgl 1722 CBC 26 : deadlock_state = DeadLockCheck(MyProc);
5773 tgl 1723 ECB :
5881 bruce 1724 GIC 26 : if (deadlock_state == DS_HARD_DEADLOCK)
1725 : {
1726 : /*
1727 : * Oops. We have a deadlock.
1728 : *
5773 tgl 1729 ECB : * Get this process out of wait state. (Note: we could do this more
1730 : * efficiently by relying on lockAwaited, but use this coding to
1731 : * preserve the flexibility to kill some other transaction than the
5773 tgl 1732 EUB : * one detecting the deadlock.)
1733 : *
697 tgl 1734 ECB : * RemoveFromWaitQueue sets MyProc->waitStatus to
1735 : * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1736 : * return from the signal handler.
1737 : */
5881 bruce 1738 GIC 4 : Assert(MyProc->waitLock != NULL);
1739 4 : RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1740 :
1741 : /*
1742 : * We're done here. Transaction abort caused by the error that
1743 : * ProcSleep will raise will cause any other locks we hold to be
5773 tgl 1744 ECB : * released, thus allowing other processes to wake up; we don't need
1745 : * to do that here. NOTE: an exception is that releasing locks we
1746 : * hold doesn't consider the possibility of waiters that were blocked
1747 : * behind us on the lock we just failed to get, and might now be
1748 : * wakable because we're not in front of them anymore. However,
1749 : * RemoveFromWaitQueue took care of waking up any such processes.
5881 bruce 1750 : */
1751 : }
1752 :
6328 tgl 1753 : /*
1754 : * And release locks. We do this in reverse order for two reasons: (1)
1755 : * Anyone else who needs more than one of the locks will be trying to lock
5624 bruce 1756 : * them in increasing order; we don't want to release the other process
1757 : * until it can get all the locks it needs. (2) This avoids O(N^2)
1758 : * behavior inside LWLockRelease.
6328 tgl 1759 : */
5773 tgl 1760 CBC 22 : check_done:
6031 bruce 1761 GIC 442 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3359 rhaas 1762 416 : LWLockRelease(LockHashPartitionLockByIndex(i));
9770 scrappy 1763 26 : }
1764 :
1765 : /*
1766 : * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1767 : *
1768 : * NB: Runs inside a signal handler, be careful.
1769 : */
1770 : void
2987 andres 1771 26 : CheckDeadLockAlert(void)
1772 : {
1773 26 : int save_errno = errno;
2987 andres 1774 ECB :
2987 andres 1775 GIC 26 : got_deadlock_timeout = true;
1776 :
2987 andres 1777 ECB : /*
1778 : * Have to set the latch again, even if handle_sig_alarm already did. Back
1779 : * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1780 : * ever would be a problem, but setting a set latch again is cheap.
1781 : *
1782 : * Note that, when this function runs inside procsignal_sigusr1_handler(),
823 fujii 1783 : * the handler function sets the latch again after the latch is set here.
1784 : */
2987 andres 1785 GIC 26 : SetLatch(MyLatch);
2987 andres 1786 CBC 26 : errno = save_errno;
2987 andres 1787 GIC 26 : }
1788 :
1789 : /*
1790 : * ProcWaitForSignal - wait for a signal from another backend.
1791 : *
1792 : * As this uses the generic process latch the caller has to be robust against
1793 : * unrelated wakeups: Always check that the desired state has occurred, and
1794 : * wait again if not.
7947 tgl 1795 ECB : */
1796 : void
2378 rhaas 1797 GIC 16 : ProcWaitForSignal(uint32 wait_event_info)
1798 : {
1598 tmunro 1799 CBC 16 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1800 : wait_event_info);
2987 andres 1801 GIC 16 : ResetLatch(MyLatch);
2987 andres 1802 CBC 16 : CHECK_FOR_INTERRUPTS();
7947 tgl 1803 16 : }
7947 tgl 1804 ECB :
1805 : /*
479 tmunro 1806 : * ProcSendSignal - set the latch of a backend identified by pgprocno
1807 : */
7947 tgl 1808 : void
479 tmunro 1809 GIC 3 : ProcSendSignal(int pgprocno)
1810 : {
1811 3 : if (pgprocno < 0 || pgprocno >= ProcGlobal->allProcCount)
479 tmunro 1812 UIC 0 : elog(ERROR, "pgprocno out of range");
1813 :
479 tmunro 1814 GIC 3 : SetLatch(&ProcGlobal->allProcs[pgprocno].procLatch);
7947 tgl 1815 3 : }
1816 :
1817 : /*
1818 : * BecomeLockGroupLeader - designate process as lock group leader
1819 : *
1820 : * Once this function has returned, other processes can join the lock group
1821 : * by calling BecomeLockGroupMember.
1822 : */
1823 : void
2618 rhaas 1824 534 : BecomeLockGroupLeader(void)
1825 : {
1826 : LWLock *leader_lwlock;
1827 :
1828 : /* If we already did it, we don't need to do it again. */
1829 534 : if (MyProc->lockGroupLeader == MyProc)
1830 479 : return;
1831 :
1832 : /* We had better not be a follower. */
1833 55 : Assert(MyProc->lockGroupLeader == NULL);
1834 :
1835 : /* Create single-member group, containing only ourselves. */
1836 55 : leader_lwlock = LockHashPartitionLockByProc(MyProc);
1837 55 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1838 55 : MyProc->lockGroupLeader = MyProc;
1839 55 : dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1840 55 : LWLockRelease(leader_lwlock);
1841 : }
1842 :
1843 : /*
1844 : * BecomeLockGroupMember - designate process as lock group member
1845 : *
1846 : * This is pretty straightforward except for the possibility that the leader
1847 : * whose group we're trying to join might exit before we manage to do so;
1848 : * and the PGPROC might get recycled for an unrelated process. To avoid
1849 : * that, we require the caller to pass the PID of the intended PGPROC as
1850 : * an interlock. Returns true if we successfully join the intended lock
1851 : * group, and false if not.
1852 : */
1853 : bool
1854 1298 : BecomeLockGroupMember(PGPROC *leader, int pid)
1855 : {
1856 : LWLock *leader_lwlock;
1857 1298 : bool ok = false;
1858 :
1859 : /* Group leader can't become member of group */
1860 1298 : Assert(MyProc != leader);
1861 :
1862 : /* Can't already be a member of a group */
2603 tgl 1863 1298 : Assert(MyProc->lockGroupLeader == NULL);
1864 :
1865 : /* PID must be valid. */
2618 rhaas 1866 1298 : Assert(pid != 0);
1867 :
1868 : /*
1869 : * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1870 : * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1871 : * because all PGPROCs' pgprocno fields are set during shared memory
1872 : * initialization and never change thereafter; so we will acquire the
1873 : * correct lock even if the leader PGPROC is in process of being recycled.
1874 : */
2604 1875 1298 : leader_lwlock = LockHashPartitionLockByProc(leader);
2618 1876 1298 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1877 :
1878 : /* Is this the leader we're looking for? */
2603 tgl 1879 1298 : if (leader->pid == pid && leader->lockGroupLeader == leader)
1880 : {
1881 : /* OK, join the group */
2618 rhaas 1882 1298 : ok = true;
1883 1298 : MyProc->lockGroupLeader = leader;
1884 1298 : dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1885 : }
1886 1298 : LWLockRelease(leader_lwlock);
1887 :
1888 1298 : return ok;
1889 : }
|