Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * lock.c
4 : : * POSTGRES primary lock mechanism
5 : : *
6 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/lock.c
12 : : *
13 : : * NOTES
14 : : * A lock table is a shared memory hash table. When
15 : : * a process tries to acquire a lock of a type that conflicts
16 : : * with existing locks, it is put to sleep using the routines
17 : : * in storage/lmgr/proc.c.
18 : : *
19 : : * For the most part, this code should be invoked via lmgr.c
20 : : * or another lock-management module, not directly.
21 : : *
22 : : * Interface:
23 : : *
24 : : * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 : : * LockAcquire(), LockRelease(), LockReleaseAll(),
26 : : * LockCheckConflicts(), GrantLock()
27 : : *
28 : : *-------------------------------------------------------------------------
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : :
35 : : #include "access/transam.h"
36 : : #include "access/twophase.h"
37 : : #include "access/twophase_rmgr.h"
38 : : #include "access/xlog.h"
39 : : #include "access/xlogutils.h"
40 : : #include "miscadmin.h"
41 : : #include "pg_trace.h"
42 : : #include "storage/proc.h"
43 : : #include "storage/procarray.h"
44 : : #include "storage/sinvaladt.h"
45 : : #include "storage/spin.h"
46 : : #include "storage/standby.h"
47 : : #include "utils/memutils.h"
48 : : #include "utils/ps_status.h"
49 : : #include "utils/resowner.h"
50 : :
51 : :
52 : : /* This configuration variable is used to set the lock table size */
53 : : int max_locks_per_xact; /* set by guc.c */
54 : :
55 : : #define NLOCKENTS() \
56 : : mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
57 : :
58 : :
59 : : /*
60 : : * Data structures defining the semantics of the standard lock methods.
61 : : *
62 : : * The conflict table defines the semantics of the various lock modes.
63 : : */
64 : : static const LOCKMASK LockConflicts[] = {
65 : : 0,
66 : :
67 : : /* AccessShareLock */
68 : : LOCKBIT_ON(AccessExclusiveLock),
69 : :
70 : : /* RowShareLock */
71 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
72 : :
73 : : /* RowExclusiveLock */
74 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
75 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
76 : :
77 : : /* ShareUpdateExclusiveLock */
78 : : LOCKBIT_ON(ShareUpdateExclusiveLock) |
79 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
80 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
81 : :
82 : : /* ShareLock */
83 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
84 : : LOCKBIT_ON(ShareRowExclusiveLock) |
85 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
86 : :
87 : : /* ShareRowExclusiveLock */
88 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
89 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
90 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
91 : :
92 : : /* ExclusiveLock */
93 : : LOCKBIT_ON(RowShareLock) |
94 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
95 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
96 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
97 : :
98 : : /* AccessExclusiveLock */
99 : : LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
100 : : LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
101 : : LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
102 : : LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
103 : :
104 : : };
105 : :
106 : : /* Names of lock modes, for debug printouts */
107 : : static const char *const lock_mode_names[] =
108 : : {
109 : : "INVALID",
110 : : "AccessShareLock",
111 : : "RowShareLock",
112 : : "RowExclusiveLock",
113 : : "ShareUpdateExclusiveLock",
114 : : "ShareLock",
115 : : "ShareRowExclusiveLock",
116 : : "ExclusiveLock",
117 : : "AccessExclusiveLock"
118 : : };
119 : :
120 : : #ifndef LOCK_DEBUG
121 : : static bool Dummy_trace = false;
122 : : #endif
123 : :
124 : : static const LockMethodData default_lockmethod = {
125 : : MaxLockMode,
126 : : LockConflicts,
127 : : lock_mode_names,
128 : : #ifdef LOCK_DEBUG
129 : : &Trace_locks
130 : : #else
131 : : &Dummy_trace
132 : : #endif
133 : : };
134 : :
135 : : static const LockMethodData user_lockmethod = {
136 : : MaxLockMode,
137 : : LockConflicts,
138 : : lock_mode_names,
139 : : #ifdef LOCK_DEBUG
140 : : &Trace_userlocks
141 : : #else
142 : : &Dummy_trace
143 : : #endif
144 : : };
145 : :
146 : : /*
147 : : * map from lock method id to the lock table data structures
148 : : */
149 : : static const LockMethod LockMethods[] = {
150 : : NULL,
151 : : &default_lockmethod,
152 : : &user_lockmethod
153 : : };
154 : :
155 : :
156 : : /* Record that's written to 2PC state file when a lock is persisted */
157 : : typedef struct TwoPhaseLockRecord
158 : : {
159 : : LOCKTAG locktag;
160 : : LOCKMODE lockmode;
161 : : } TwoPhaseLockRecord;
162 : :
163 : :
164 : : /*
165 : : * Count of the number of fast path lock slots we believe to be used. This
166 : : * might be higher than the real number if another backend has transferred
167 : : * our locks to the primary lock table, but it can never be lower than the
168 : : * real value, since only we can acquire locks on our own behalf.
169 : : */
170 : : static int FastPathLocalUseCount = 0;
171 : :
172 : : /*
173 : : * Flag to indicate if the relation extension lock is held by this backend.
174 : : * This flag is used to ensure that while holding the relation extension lock
175 : : * we don't try to acquire a heavyweight lock on any other object. This
176 : : * restriction implies that the relation extension lock won't ever participate
177 : : * in the deadlock cycle because we can never wait for any other heavyweight
178 : : * lock after acquiring this lock.
179 : : *
180 : : * Such a restriction is okay for relation extension locks as unlike other
181 : : * heavyweight locks these are not held till the transaction end. These are
182 : : * taken for a short duration to extend a particular relation and then
183 : : * released.
184 : : */
185 : : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
186 : :
187 : : /* Macros for manipulating proc->fpLockBits */
188 : : #define FAST_PATH_BITS_PER_SLOT 3
189 : : #define FAST_PATH_LOCKNUMBER_OFFSET 1
190 : : #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
191 : : #define FAST_PATH_GET_BITS(proc, n) \
192 : : (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
193 : : #define FAST_PATH_BIT_POSITION(n, l) \
194 : : (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
195 : : AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
196 : : AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
197 : : ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
198 : : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
199 : : (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
200 : : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
201 : : (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
202 : : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
203 : : ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
204 : :
205 : : /*
206 : : * The fast-path lock mechanism is concerned only with relation locks on
207 : : * unshared relations by backends bound to a database. The fast-path
208 : : * mechanism exists mostly to accelerate acquisition and release of locks
209 : : * that rarely conflict. Because ShareUpdateExclusiveLock is
210 : : * self-conflicting, it can't use the fast-path mechanism; but it also does
211 : : * not conflict with any of the locks that do, so we can ignore it completely.
212 : : */
213 : : #define EligibleForRelationFastPath(locktag, mode) \
214 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
215 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
216 : : (locktag)->locktag_field1 == MyDatabaseId && \
217 : : MyDatabaseId != InvalidOid && \
218 : : (mode) < ShareUpdateExclusiveLock)
219 : : #define ConflictsWithRelationFastPath(locktag, mode) \
220 : : ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
221 : : (locktag)->locktag_type == LOCKTAG_RELATION && \
222 : : (locktag)->locktag_field1 != InvalidOid && \
223 : : (mode) > ShareUpdateExclusiveLock)
224 : :
225 : : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
226 : : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
227 : : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
228 : : const LOCKTAG *locktag, uint32 hashcode);
229 : : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
230 : :
231 : : /*
232 : : * To make the fast-path lock mechanism work, we must have some way of
233 : : * preventing the use of the fast-path when a conflicting lock might be present.
234 : : * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
235 : : * and maintain an integer count of the number of "strong" lockers
236 : : * in each partition. When any "strong" lockers are present (which is
237 : : * hopefully not very often), the fast-path mechanism can't be used, and we
238 : : * must fall back to the slower method of pushing matching locks directly
239 : : * into the main lock tables.
240 : : *
241 : : * The deadlock detector does not know anything about the fast path mechanism,
242 : : * so any locks that might be involved in a deadlock must be transferred from
243 : : * the fast-path queues to the main lock table.
244 : : */
245 : :
246 : : #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
247 : : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
248 : : (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
249 : : #define FastPathStrongLockHashPartition(hashcode) \
250 : : ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
251 : :
252 : : typedef struct
253 : : {
254 : : slock_t mutex;
255 : : uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
256 : : } FastPathStrongRelationLockData;
257 : :
258 : : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
259 : :
260 : :
261 : : /*
262 : : * Pointers to hash tables containing lock state
263 : : *
264 : : * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
265 : : * shared memory; LockMethodLocalHash is local to each backend.
266 : : */
267 : : static HTAB *LockMethodLockHash;
268 : : static HTAB *LockMethodProcLockHash;
269 : : static HTAB *LockMethodLocalHash;
270 : :
271 : :
272 : : /* private state for error cleanup */
273 : : static LOCALLOCK *StrongLockInProgress;
274 : : static LOCALLOCK *awaitedLock;
275 : : static ResourceOwner awaitedOwner;
276 : :
277 : :
278 : : #ifdef LOCK_DEBUG
279 : :
280 : : /*------
281 : : * The following configuration options are available for lock debugging:
282 : : *
283 : : * TRACE_LOCKS -- give a bunch of output what's going on in this file
284 : : * TRACE_USERLOCKS -- same but for user locks
285 : : * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
286 : : * (use to avoid output on system tables)
287 : : * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
288 : : * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
289 : : *
290 : : * Furthermore, but in storage/lmgr/lwlock.c:
291 : : * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
292 : : *
293 : : * Define LOCK_DEBUG at compile time to get all these enabled.
294 : : * --------
295 : : */
296 : :
297 : : int Trace_lock_oidmin = FirstNormalObjectId;
298 : : bool Trace_locks = false;
299 : : bool Trace_userlocks = false;
300 : : int Trace_lock_table = 0;
301 : : bool Debug_deadlocks = false;
302 : :
303 : :
304 : : inline static bool
305 : : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
306 : : {
307 : : return
308 : : (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
309 : : ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
310 : : || (Trace_lock_table &&
311 : : (tag->locktag_field2 == Trace_lock_table));
312 : : }
313 : :
314 : :
315 : : inline static void
316 : : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
317 : : {
318 : : if (LOCK_DEBUG_ENABLED(&lock->tag))
319 : : elog(LOG,
320 : : "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
321 : : "req(%d,%d,%d,%d,%d,%d,%d)=%d "
322 : : "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
323 : : where, lock,
324 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
325 : : lock->tag.locktag_field3, lock->tag.locktag_field4,
326 : : lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
327 : : lock->grantMask,
328 : : lock->requested[1], lock->requested[2], lock->requested[3],
329 : : lock->requested[4], lock->requested[5], lock->requested[6],
330 : : lock->requested[7], lock->nRequested,
331 : : lock->granted[1], lock->granted[2], lock->granted[3],
332 : : lock->granted[4], lock->granted[5], lock->granted[6],
333 : : lock->granted[7], lock->nGranted,
334 : : dclist_count(&lock->waitProcs),
335 : : LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
336 : : }
337 : :
338 : :
339 : : inline static void
340 : : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
341 : : {
342 : : if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
343 : : elog(LOG,
344 : : "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
345 : : where, proclockP, proclockP->tag.myLock,
346 : : PROCLOCK_LOCKMETHOD(*(proclockP)),
347 : : proclockP->tag.myProc, (int) proclockP->holdMask);
348 : : }
349 : : #else /* not LOCK_DEBUG */
350 : :
351 : : #define LOCK_PRINT(where, lock, type) ((void) 0)
352 : : #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
353 : : #endif /* not LOCK_DEBUG */
354 : :
355 : :
356 : : static uint32 proclock_hash(const void *key, Size keysize);
357 : : static void RemoveLocalLock(LOCALLOCK *locallock);
358 : : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
359 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
360 : : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
361 : : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
362 : : static void FinishStrongLockAcquire(void);
363 : : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner,
364 : : bool dontWait);
365 : : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
366 : : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
367 : : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
368 : : PROCLOCK *proclock, LockMethod lockMethodTable);
369 : : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
370 : : LockMethod lockMethodTable, uint32 hashcode,
371 : : bool wakeupNeeded);
372 : : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
373 : : LOCKTAG *locktag, LOCKMODE lockmode,
374 : : bool decrement_strong_lock_count);
375 : : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
376 : : BlockedProcsData *data);
377 : :
378 : :
379 : : /*
380 : : * InitLocks -- Initialize the lock manager's data structures.
381 : : *
382 : : * This is called from CreateSharedMemoryAndSemaphores(), which see for
383 : : * more comments. In the normal postmaster case, the shared hash tables
384 : : * are created here, as well as a locallock hash table that will remain
385 : : * unused and empty in the postmaster itself. Backends inherit the pointers
386 : : * to the shared tables via fork(), and also inherit an image of the locallock
387 : : * hash table, which they proceed to use. In the EXEC_BACKEND case, each
388 : : * backend re-executes this code to obtain pointers to the already existing
389 : : * shared hash tables and to create its locallock hash table.
390 : : */
391 : : void
8514 tgl@sss.pgh.pa.us 392 :CBC 898 : InitLocks(void)
393 : : {
394 : : HASHCTL info;
395 : : long init_table_size,
396 : : max_table_size;
397 : : bool found;
398 : :
399 : : /*
400 : : * Compute init/max size to request for lock hashtables. Note these
401 : : * calculations must agree with LockShmemSize!
402 : : */
6876 403 : 898 : max_table_size = NLOCKENTS();
7138 404 : 898 : init_table_size = max_table_size / 2;
405 : :
406 : : /*
407 : : * Allocate hash table for LOCK structs. This stores per-locked-object
408 : : * information.
409 : : */
8231 410 : 898 : info.keysize = sizeof(LOCKTAG);
411 : 898 : info.entrysize = sizeof(LOCK);
6475 412 : 898 : info.num_partitions = NUM_LOCK_PARTITIONS;
413 : :
414 : 898 : LockMethodLockHash = ShmemInitHash("LOCK hash",
415 : : init_table_size,
416 : : max_table_size,
417 : : &info,
418 : : HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
419 : :
420 : : /* Assume an average of 2 holders per lock */
6699 421 : 898 : max_table_size *= 2;
422 : 898 : init_table_size *= 2;
423 : :
424 : : /*
425 : : * Allocate hash table for PROCLOCK structs. This stores
426 : : * per-lock-per-holder information.
427 : : */
7940 bruce@momjian.us 428 : 898 : info.keysize = sizeof(PROCLOCKTAG);
429 : 898 : info.entrysize = sizeof(PROCLOCK);
6475 tgl@sss.pgh.pa.us 430 : 898 : info.hash = proclock_hash;
431 : 898 : info.num_partitions = NUM_LOCK_PARTITIONS;
432 : :
433 : 898 : LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
434 : : init_table_size,
435 : : max_table_size,
436 : : &info,
437 : : HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
438 : :
439 : : /*
440 : : * Allocate fast-path structures.
441 : : */
4653 rhaas@postgresql.org 442 : 898 : FastPathStrongRelationLocks =
443 : 898 : ShmemInitStruct("Fast Path Strong Relation Lock Data",
444 : : sizeof(FastPathStrongRelationLockData), &found);
4705 445 [ + - ]: 898 : if (!found)
4653 446 : 898 : SpinLockInit(&FastPathStrongRelationLocks->mutex);
447 : :
448 : : /*
449 : : * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
450 : : * counts and resource owner information.
451 : : *
452 : : * The non-shared table could already exist in this process (this occurs
453 : : * when the postmaster is recreating shared memory after a backend crash).
454 : : * If so, delete and recreate it. (We could simply leave it, since it
455 : : * ought to be empty in the postmaster, but for safety let's zap it.)
456 : : */
6701 tgl@sss.pgh.pa.us 457 [ + + ]: 898 : if (LockMethodLocalHash)
458 : 5 : hash_destroy(LockMethodLocalHash);
459 : :
7170 460 : 898 : info.keysize = sizeof(LOCALLOCKTAG);
461 : 898 : info.entrysize = sizeof(LOCALLOCK);
462 : :
6701 463 : 898 : LockMethodLocalHash = hash_create("LOCALLOCK hash",
464 : : 16,
465 : : &info,
466 : : HASH_ELEM | HASH_BLOBS);
10141 scrappy@hub.org 467 : 898 : }
468 : :
469 : :
470 : : /*
471 : : * Fetch the lock method table associated with a given lock
472 : : */
473 : : LockMethod
6701 tgl@sss.pgh.pa.us 474 : 102 : GetLocksMethodTable(const LOCK *lock)
475 : : {
476 : 102 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
477 : :
478 [ + - - + ]: 102 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
479 : 102 : return LockMethods[lockmethodid];
480 : : }
481 : :
482 : : /*
483 : : * Fetch the lock method table associated with a given locktag
484 : : */
485 : : LockMethod
2974 486 : 1119 : GetLockTagsMethodTable(const LOCKTAG *locktag)
487 : : {
488 : 1119 : LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
489 : :
490 [ + - - + ]: 1119 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
491 : 1119 : return LockMethods[lockmethodid];
492 : : }
493 : :
494 : :
495 : : /*
496 : : * Compute the hash code associated with a LOCKTAG.
497 : : *
498 : : * To avoid unnecessary recomputations of the hash code, we try to do this
499 : : * just once per function, and then pass it around as needed. Aside from
500 : : * passing the hashcode to hash_search_with_hash_value(), we can extract
501 : : * the lock partition number from the hashcode.
502 : : */
503 : : uint32
6475 504 : 15952276 : LockTagHashCode(const LOCKTAG *locktag)
505 : : {
506 : 15952276 : return get_hash_value(LockMethodLockHash, (const void *) locktag);
507 : : }
508 : :
509 : : /*
510 : : * Compute the hash code associated with a PROCLOCKTAG.
511 : : *
512 : : * Because we want to use just one set of partition locks for both the
513 : : * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
514 : : * fall into the same partition number as their associated LOCKs.
515 : : * dynahash.c expects the partition number to be the low-order bits of
516 : : * the hash code, and therefore a PROCLOCKTAG's hash code must have the
517 : : * same low-order bits as the associated LOCKTAG's hash code. We achieve
518 : : * this with this specialized hash function.
519 : : */
520 : : static uint32
521 : 860 : proclock_hash(const void *key, Size keysize)
522 : : {
523 : 860 : const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
524 : : uint32 lockhash;
525 : : Datum procptr;
526 : :
527 [ - + ]: 860 : Assert(keysize == sizeof(PROCLOCKTAG));
528 : :
529 : : /* Look into the associated LOCK object, and compute its hash code */
530 : 860 : lockhash = LockTagHashCode(&proclocktag->myLock->tag);
531 : :
532 : : /*
533 : : * To make the hash code also depend on the PGPROC, we xor the proc
534 : : * struct's address into the hash code, left-shifted so that the
535 : : * partition-number bits don't change. Since this is only a hash, we
536 : : * don't care if we lose high-order bits of the address; use an
537 : : * intermediate variable to suppress cast-pointer-to-int warnings.
538 : : */
539 : 860 : procptr = PointerGetDatum(proclocktag->myProc);
540 : 860 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
541 : :
542 : 860 : return lockhash;
543 : : }
544 : :
545 : : /*
546 : : * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
547 : : * for its underlying LOCK.
548 : : *
549 : : * We use this just to avoid redundant calls of LockTagHashCode().
550 : : */
551 : : static inline uint32
552 : 3814215 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
553 : : {
6402 bruce@momjian.us 554 : 3814215 : uint32 lockhash = hashcode;
555 : : Datum procptr;
556 : :
557 : : /*
558 : : * This must match proclock_hash()!
559 : : */
6475 tgl@sss.pgh.pa.us 560 : 3814215 : procptr = PointerGetDatum(proclocktag->myProc);
561 : 3814215 : lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
562 : :
563 : 3814215 : return lockhash;
564 : : }
565 : :
566 : : /*
567 : : * Given two lock modes, return whether they would conflict.
568 : : */
569 : : bool
4099 alvherre@alvh.no-ip. 570 : 129760 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
571 : : {
572 : 129760 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
573 : :
574 [ + + ]: 129760 : if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
575 : 129664 : return true;
576 : :
577 : 96 : return false;
578 : : }
579 : :
580 : : /*
581 : : * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
582 : : * by the current transaction
583 : : */
584 : : bool
2022 tgl@sss.pgh.pa.us 585 : 3867040 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
586 : : {
587 : : LOCALLOCKTAG localtag;
588 : : LOCALLOCK *locallock;
589 : :
590 : : /*
591 : : * See if there is a LOCALLOCK entry for this lock and lockmode
592 : : */
593 [ + - - + : 3867040 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
594 : 3867040 : localtag.lock = *locktag;
595 : 3867040 : localtag.mode = lockmode;
596 : :
597 : 3867040 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
598 : : &localtag,
599 : : HASH_FIND, NULL);
600 : :
601 [ + + + - ]: 3867040 : return (locallock && locallock->nLocks > 0);
602 : : }
603 : :
604 : : #ifdef USE_ASSERT_CHECKING
605 : : /*
606 : : * GetLockMethodLocalHash -- return the hash of local locks, for modules that
607 : : * evaluate assertions based on all locks held.
608 : : */
609 : : HTAB *
1471 noah@leadboat.com 610 : 5640 : GetLockMethodLocalHash(void)
611 : : {
612 : 5640 : return LockMethodLocalHash;
613 : : }
614 : : #endif
615 : :
616 : : /*
617 : : * LockHasWaiters -- look up 'locktag' and check if releasing this
618 : : * lock would wake up other processes waiting for it.
619 : : */
620 : : bool
4142 kgrittn@postgresql.o 621 :UBC 0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
622 : : {
623 : 0 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
624 : : LockMethod lockMethodTable;
625 : : LOCALLOCKTAG localtag;
626 : : LOCALLOCK *locallock;
627 : : LOCK *lock;
628 : : PROCLOCK *proclock;
629 : : LWLock *partitionLock;
630 : 0 : bool hasWaiters = false;
631 : :
632 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
633 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
634 : 0 : lockMethodTable = LockMethods[lockmethodid];
635 [ # # # # ]: 0 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
636 [ # # ]: 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
637 : :
638 : : #ifdef LOCK_DEBUG
639 : : if (LOCK_DEBUG_ENABLED(locktag))
640 : : elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
641 : : locktag->locktag_field1, locktag->locktag_field2,
642 : : lockMethodTable->lockModeNames[lockmode]);
643 : : #endif
644 : :
645 : : /*
646 : : * Find the LOCALLOCK entry for this lock and lockmode
647 : : */
2489 tgl@sss.pgh.pa.us 648 [ # # # # : 0 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
# # # # #
# ]
4142 kgrittn@postgresql.o 649 : 0 : localtag.lock = *locktag;
650 : 0 : localtag.mode = lockmode;
651 : :
652 : 0 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
653 : : &localtag,
654 : : HASH_FIND, NULL);
655 : :
656 : : /*
657 : : * let the caller print its own error message, too. Do not ereport(ERROR).
658 : : */
659 [ # # # # ]: 0 : if (!locallock || locallock->nLocks <= 0)
660 : : {
661 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
662 : : lockMethodTable->lockModeNames[lockmode]);
663 : 0 : return false;
664 : : }
665 : :
666 : : /*
667 : : * Check the shared lock table.
668 : : */
669 : 0 : partitionLock = LockHashPartitionLock(locallock->hashcode);
670 : :
671 : 0 : LWLockAcquire(partitionLock, LW_SHARED);
672 : :
673 : : /*
674 : : * We don't need to re-find the lock or proclock, since we kept their
675 : : * addresses in the locallock table, and they couldn't have been removed
676 : : * while we were holding a lock on them.
677 : : */
678 : 0 : lock = locallock->lock;
679 : : LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
680 : 0 : proclock = locallock->proclock;
681 : : PROCLOCK_PRINT("LockHasWaiters: found", proclock);
682 : :
683 : : /*
684 : : * Double-check that we are actually holding a lock of the type we want to
685 : : * release.
686 : : */
687 [ # # ]: 0 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
688 : : {
689 : : PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
690 : 0 : LWLockRelease(partitionLock);
691 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
692 : : lockMethodTable->lockModeNames[lockmode]);
693 : 0 : RemoveLocalLock(locallock);
694 : 0 : return false;
695 : : }
696 : :
697 : : /*
698 : : * Do the checking.
699 : : */
700 [ # # ]: 0 : if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
701 : 0 : hasWaiters = true;
702 : :
703 : 0 : LWLockRelease(partitionLock);
704 : :
705 : 0 : return hasWaiters;
706 : : }
707 : :
708 : : /*
709 : : * LockAcquire -- Check for lock conflicts, sleep if conflict found,
710 : : * set lock if/when no conflicts.
711 : : *
712 : : * Inputs:
713 : : * locktag: unique identifier for the lockable object
714 : : * lockmode: lock mode to acquire
715 : : * sessionLock: if true, acquire lock for session not current transaction
716 : : * dontWait: if true, don't wait to acquire lock
717 : : *
718 : : * Returns one of:
719 : : * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
720 : : * LOCKACQUIRE_OK lock successfully acquired
721 : : * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
722 : : * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
723 : : *
724 : : * In the normal case where dontWait=false and the caller doesn't need to
725 : : * distinguish a freshly acquired lock from one already taken earlier in
726 : : * this same transaction, there is no need to examine the return value.
727 : : *
728 : : * Side Effects: The lock is acquired and recorded in lock tables.
729 : : *
730 : : * NOTE: if we wait for the lock, there is no way to abort the wait
731 : : * short of aborting the transaction.
732 : : */
733 : : LockAcquireResult
6701 tgl@sss.pgh.pa.us 734 :CBC 547228 : LockAcquire(const LOCKTAG *locktag,
735 : : LOCKMODE lockmode,
736 : : bool sessionLock,
737 : : bool dontWait)
738 : : {
2046 739 : 547228 : return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
740 : : true, NULL);
741 : : }
742 : :
743 : : /*
744 : : * LockAcquireExtended - allows us to specify additional options
745 : : *
746 : : * reportMemoryError specifies whether a lock request that fills the lock
747 : : * table should generate an ERROR or not. Passing "false" allows the caller
748 : : * to attempt to recover from lock-table-full situations, perhaps by forcibly
749 : : * canceling other lock holders and then retrying. Note, however, that the
750 : : * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
751 : : * in combination with dontWait = true, as the cause of failure couldn't be
752 : : * distinguished.
753 : : *
754 : : * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
755 : : * table entry if a lock is successfully acquired, or NULL if not.
756 : : */
757 : : LockAcquireResult
5230 simon@2ndQuadrant.co 758 : 17132179 : LockAcquireExtended(const LOCKTAG *locktag,
759 : : LOCKMODE lockmode,
760 : : bool sessionLock,
761 : : bool dontWait,
762 : : bool reportMemoryError,
763 : : LOCALLOCK **locallockp)
764 : : {
6701 tgl@sss.pgh.pa.us 765 : 17132179 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
766 : : LockMethod lockMethodTable;
767 : : LOCALLOCKTAG localtag;
768 : : LOCALLOCK *locallock;
769 : : LOCK *lock;
770 : : PROCLOCK *proclock;
771 : : bool found;
772 : : ResourceOwner owner;
773 : : uint32 hashcode;
774 : : LWLock *partitionLock;
775 : : bool found_conflict;
4885 simon@2ndQuadrant.co 776 : 17132179 : bool log_lock = false;
777 : :
6701 tgl@sss.pgh.pa.us 778 [ + - - + ]: 17132179 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6701 tgl@sss.pgh.pa.us 779 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6701 tgl@sss.pgh.pa.us 780 :CBC 17132179 : lockMethodTable = LockMethods[lockmethodid];
781 [ + - - + ]: 17132179 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
6701 tgl@sss.pgh.pa.us 782 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
783 : :
5230 simon@2ndQuadrant.co 784 [ + + + + ]:CBC 17132179 : if (RecoveryInProgress() && !InRecovery &&
785 [ + + ]: 219079 : (locktag->locktag_type == LOCKTAG_OBJECT ||
5161 bruce@momjian.us 786 [ + - - + ]: 219079 : locktag->locktag_type == LOCKTAG_RELATION) &&
787 : : lockmode > RowExclusiveLock)
5230 simon@2ndQuadrant.co 788 [ # # ]:UBC 0 : ereport(ERROR,
789 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
790 : : errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
791 : : lockMethodTable->lockModeNames[lockmode]),
792 : : errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
793 : :
794 : : #ifdef LOCK_DEBUG
795 : : if (LOCK_DEBUG_ENABLED(locktag))
796 : : elog(LOG, "LockAcquire: lock [%u,%u] %s",
797 : : locktag->locktag_field1, locktag->locktag_field2,
798 : : lockMethodTable->lockModeNames[lockmode]);
799 : : #endif
800 : :
801 : : /* Identify owner for lock */
4363 tgl@sss.pgh.pa.us 802 [ + + ]:CBC 17132179 : if (sessionLock)
7170 803 : 107583 : owner = NULL;
804 : : else
4363 805 : 17024596 : owner = CurrentResourceOwner;
806 : :
807 : : /*
808 : : * Find or create a LOCALLOCK entry for this lock and lockmode
809 : : */
2489 810 [ + - - + : 17132179 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7170 811 : 17132179 : localtag.lock = *locktag;
812 : 17132179 : localtag.mode = lockmode;
813 : :
6701 814 : 17132179 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
815 : : &localtag,
816 : : HASH_ENTER, &found);
817 : :
818 : : /*
819 : : * if it's a new locallock object, initialize it
820 : : */
7170 821 [ + + ]: 17132179 : if (!found)
822 : : {
823 : 15450902 : locallock->lock = NULL;
824 : 15450902 : locallock->proclock = NULL;
6475 825 : 15450902 : locallock->hashcode = LockTagHashCode(&(localtag.lock));
7170 826 : 15450902 : locallock->nLocks = 0;
2046 827 : 15450902 : locallock->holdsStrongLockCount = false;
828 : 15450902 : locallock->lockCleared = false;
7170 829 : 15450902 : locallock->numLockOwners = 0;
830 : 15450902 : locallock->maxLockOwners = 8;
3129 831 : 15450902 : locallock->lockOwners = NULL; /* in case next line fails */
7170 832 : 15450902 : locallock->lockOwners = (LOCALLOCKOWNER *)
833 : 15450902 : MemoryContextAlloc(TopMemoryContext,
2489 834 : 15450902 : locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
835 : : }
836 : : else
837 : : {
838 : : /* Make sure there will be room to remember the lock */
7170 839 [ + + ]: 1681277 : if (locallock->numLockOwners >= locallock->maxLockOwners)
840 : : {
7168 bruce@momjian.us 841 : 19 : int newsize = locallock->maxLockOwners * 2;
842 : :
7170 tgl@sss.pgh.pa.us 843 : 19 : locallock->lockOwners = (LOCALLOCKOWNER *)
844 : 19 : repalloc(locallock->lockOwners,
845 : : newsize * sizeof(LOCALLOCKOWNER));
846 : 19 : locallock->maxLockOwners = newsize;
847 : : }
848 : : }
4705 rhaas@postgresql.org 849 : 17132179 : hashcode = locallock->hashcode;
850 : :
2046 tgl@sss.pgh.pa.us 851 [ + + ]: 17132179 : if (locallockp)
852 : 16584951 : *locallockp = locallock;
853 : :
854 : : /*
855 : : * If we already hold the lock, we can just increase the count locally.
856 : : *
857 : : * If lockCleared is already set, caller need not worry about absorbing
858 : : * sinval messages related to the lock's object.
859 : : */
7170 860 [ + + ]: 17132179 : if (locallock->nLocks > 0)
861 : : {
862 : 1681277 : GrantLockLocal(locallock, owner);
2046 863 [ + + ]: 1681277 : if (locallock->lockCleared)
864 : 1614361 : return LOCKACQUIRE_ALREADY_CLEAR;
865 : : else
866 : 66916 : return LOCKACQUIRE_ALREADY_HELD;
867 : : }
868 : :
869 : : /*
870 : : * We don't acquire any other heavyweight lock while holding the relation
871 : : * extension lock. We do allow to acquire the same relation extension
872 : : * lock more than once but that case won't reach here.
873 : : */
1488 akapila@postgresql.o 874 [ - + ]: 15450902 : Assert(!IsRelationExtensionLockHeld);
875 : :
876 : : /*
877 : : * Prepare to emit a WAL record if acquisition of this lock needs to be
878 : : * replayed in a standby server.
879 : : *
880 : : * Here we prepare to log; after lock is acquired we'll issue log record.
881 : : * This arrangement simplifies error recovery in case the preparation step
882 : : * fails.
883 : : *
884 : : * Only AccessExclusiveLocks can conflict with lock types that read-only
885 : : * transactions can acquire in a standby server. Make sure this definition
886 : : * matches the one in GetRunningTransactionLocks().
887 : : */
4885 simon@2ndQuadrant.co 888 [ + + ]: 15450902 : if (lockmode >= AccessExclusiveLock &&
889 [ + + ]: 194101 : locktag->locktag_type == LOCKTAG_RELATION &&
890 [ + + ]: 129814 : !RecoveryInProgress() &&
891 [ + + ]: 108291 : XLogStandbyInfoActive())
892 : : {
893 : 81398 : LogAccessExclusiveLockPrepare();
894 : 81398 : log_lock = true;
895 : : }
896 : :
897 : : /*
898 : : * Attempt to take lock via fast path, if eligible. But if we remember
899 : : * having filled up the fast path array, we don't attempt to make any
900 : : * further use of it until we release some locks. It's possible that some
901 : : * other backend has transferred some of those locks to the shared hash
902 : : * table, leaving space free, but it's not worth acquiring the LWLock just
903 : : * to check. It's also possible that we're acquiring a second or third
904 : : * lock type on a relation we have already locked using the fast-path, but
905 : : * for now we don't worry about that case either.
906 : : */
3791 tgl@sss.pgh.pa.us 907 [ + + + + : 15450902 : if (EligibleForRelationFastPath(locktag, lockmode) &&
+ + + + +
+ ]
908 [ + + ]: 13999676 : FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
909 : : {
4326 bruce@momjian.us 910 : 13690170 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
911 : : bool acquired;
912 : :
913 : : /*
914 : : * LWLockAcquire acts as a memory sequencing point, so it's safe to
915 : : * assume that any strong locker whose increment to
916 : : * FastPathStrongRelationLocks->counts becomes visible after we test
917 : : * it has yet to begin to transfer fast-path locks.
918 : : */
1430 tgl@sss.pgh.pa.us 919 : 13690170 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4337 rhaas@postgresql.org 920 [ + + ]: 13690170 : if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
921 : 244624 : acquired = false;
922 : : else
923 : 13445546 : acquired = FastPathGrantRelationLock(locktag->locktag_field2,
924 : : lockmode);
1430 tgl@sss.pgh.pa.us 925 : 13690170 : LWLockRelease(&MyProc->fpInfoLock);
4337 rhaas@postgresql.org 926 [ + + ]: 13690170 : if (acquired)
927 : : {
928 : : /*
929 : : * The locallock might contain stale pointers to some old shared
930 : : * objects; we MUST reset these to null before considering the
931 : : * lock to be acquired via fast-path.
932 : : */
3791 tgl@sss.pgh.pa.us 933 : 13445546 : locallock->lock = NULL;
934 : 13445546 : locallock->proclock = NULL;
4337 rhaas@postgresql.org 935 : 13445546 : GrantLockLocal(locallock, owner);
936 : 13445546 : return LOCKACQUIRE_OK;
937 : : }
938 : : }
939 : :
940 : : /*
941 : : * If this lock could potentially have been taken via the fast-path by
942 : : * some other backend, we must (temporarily) disable further use of the
943 : : * fast-path for this lock tag, and migrate any locks already taken via
944 : : * this method to the main lock table.
945 : : */
946 [ + + + + : 2005356 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ + + + ]
947 : : {
4326 bruce@momjian.us 948 : 156365 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
949 : :
4337 rhaas@postgresql.org 950 : 156365 : BeginStrongLockAcquire(locallock, fasthashcode);
951 [ - + ]: 156365 : if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
952 : : hashcode))
953 : : {
4337 rhaas@postgresql.org 954 :UBC 0 : AbortStrongLockAcquire();
2046 tgl@sss.pgh.pa.us 955 [ # # ]: 0 : if (locallock->nLocks == 0)
956 : 0 : RemoveLocalLock(locallock);
957 [ # # ]: 0 : if (locallockp)
958 : 0 : *locallockp = NULL;
4337 rhaas@postgresql.org 959 [ # # ]: 0 : if (reportMemoryError)
960 [ # # ]: 0 : ereport(ERROR,
961 : : (errcode(ERRCODE_OUT_OF_MEMORY),
962 : : errmsg("out of shared memory"),
963 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
964 : : else
965 : 0 : return LOCKACQUIRE_NOT_AVAIL;
966 : : }
967 : : }
968 : :
969 : : /*
970 : : * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
971 : : * take it via the fast-path, either, so we've got to mess with the shared
972 : : * lock table.
973 : : */
6475 tgl@sss.pgh.pa.us 974 :CBC 2005356 : partitionLock = LockHashPartitionLock(hashcode);
975 : :
6699 976 : 2005356 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
977 : :
978 : : /*
979 : : * Find or create lock and proclock entries with this tag
980 : : *
981 : : * Note: if the locallock object already existed, it might have a pointer
982 : : * to the lock already ... but we should not assume that that pointer is
983 : : * valid, since a lock object with zero hold and request counts can go
984 : : * away anytime. So we have to use SetupLockInTable() to recompute the
985 : : * lock and proclock pointers, even if they're already set.
986 : : */
4705 rhaas@postgresql.org 987 : 2005356 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
988 : : hashcode, lockmode);
989 [ - + ]: 2005356 : if (!proclock)
990 : : {
4379 rhaas@postgresql.org 991 :UBC 0 : AbortStrongLockAcquire();
4705 992 : 0 : LWLockRelease(partitionLock);
2046 tgl@sss.pgh.pa.us 993 [ # # ]: 0 : if (locallock->nLocks == 0)
994 : 0 : RemoveLocalLock(locallock);
995 [ # # ]: 0 : if (locallockp)
996 : 0 : *locallockp = NULL;
4705 rhaas@postgresql.org 997 [ # # ]: 0 : if (reportMemoryError)
998 [ # # ]: 0 : ereport(ERROR,
999 : : (errcode(ERRCODE_OUT_OF_MEMORY),
1000 : : errmsg("out of shared memory"),
1001 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
1002 : : else
1003 : 0 : return LOCKACQUIRE_NOT_AVAIL;
1004 : : }
4705 rhaas@postgresql.org 1005 :CBC 2005356 : locallock->proclock = proclock;
1006 : 2005356 : lock = proclock->tag.myLock;
1007 : 2005356 : locallock->lock = lock;
1008 : :
1009 : : /*
1010 : : * If lock requested conflicts with locks requested by waiters, must join
1011 : : * wait queue. Otherwise, check for conflict with already-held locks.
1012 : : * (That's last because most complex check.)
1013 : : */
1014 [ + + ]: 2005356 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1568 peter@eisentraut.org 1015 : 59 : found_conflict = true;
1016 : : else
1017 : 2005297 : found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1018 : : lock, proclock);
1019 : :
1020 [ + + ]: 2005356 : if (!found_conflict)
1021 : : {
1022 : : /* No conflict with held or previously requested locks */
4705 rhaas@postgresql.org 1023 : 2003594 : GrantLock(lock, proclock, lockmode);
1024 : 2003594 : GrantLockLocal(locallock, owner);
1025 : : }
1026 : : else
1027 : : {
1028 : : /*
1029 : : * Set bitmask of locks this process already holds on this object.
1030 : : */
1031 : 1762 : MyProc->heldLocks = proclock->holdMask;
1032 : :
1033 : : /*
1034 : : * Sleep till someone wakes me up. We do this even in the dontWait
1035 : : * case, beause while trying to go to sleep, we may discover that we
1036 : : * can acquire the lock immediately after all.
1037 : : */
1038 : :
1039 : : TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1040 : : locktag->locktag_field2,
1041 : : locktag->locktag_field3,
1042 : : locktag->locktag_field4,
1043 : : locktag->locktag_type,
1044 : : lockmode);
1045 : :
31 rhaas@postgresql.org 1046 :GNC 1762 : WaitOnLock(locallock, owner, dontWait);
1047 : :
1048 : : TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1049 : : locktag->locktag_field2,
1050 : : locktag->locktag_field3,
1051 : : locktag->locktag_field4,
1052 : : locktag->locktag_type,
1053 : : lockmode);
1054 : :
1055 : : /*
1056 : : * NOTE: do not do any material change of state between here and
1057 : : * return. All required changes in locktable state must have been
1058 : : * done when the lock was granted to us --- see notes in WaitOnLock.
1059 : : */
1060 : :
1061 : : /*
1062 : : * Check the proclock entry status. If dontWait = true, this is an
1063 : : * expected case; otherwise, it will open happen if something in the
1064 : : * ipc communication doesn't work correctly.
1065 : : */
4705 rhaas@postgresql.org 1066 [ + + ]:CBC 1718 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1067 : : {
4379 rhaas@postgresql.org 1068 :GBC 652 : AbortStrongLockAcquire();
1069 : :
31 rhaas@postgresql.org 1070 [ + - ]:GNC 652 : if (dontWait)
1071 : : {
1072 : : /*
1073 : : * We can't acquire the lock immediately. If caller specified
1074 : : * no blocking, remove useless table entries and return
1075 : : * LOCKACQUIRE_NOT_AVAIL without waiting.
1076 : : */
1077 [ + + ]: 652 : if (proclock->holdMask == 0)
1078 : : {
1079 : : uint32 proclock_hashcode;
1080 : :
1081 : 448 : proclock_hashcode = ProcLockHashCode(&proclock->tag,
1082 : : hashcode);
1083 : 448 : dlist_delete(&proclock->lockLink);
1084 : 448 : dlist_delete(&proclock->procLink);
1085 [ - + ]: 448 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
1086 : 448 : &(proclock->tag),
1087 : : proclock_hashcode,
1088 : : HASH_REMOVE,
1089 : : NULL))
31 rhaas@postgresql.org 1090 [ # # ]:UNC 0 : elog(PANIC, "proclock table corrupted");
1091 : : }
1092 : : else
1093 : : PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
31 rhaas@postgresql.org 1094 :GNC 652 : lock->nRequested--;
1095 : 652 : lock->requested[lockmode]--;
1096 : : LOCK_PRINT("LockAcquire: conditional lock failed",
1097 : : lock, lockmode);
1098 [ + - - + ]: 652 : Assert((lock->nRequested > 0) &&
1099 : : (lock->requested[lockmode] >= 0));
1100 [ - + ]: 652 : Assert(lock->nGranted <= lock->nRequested);
1101 : 652 : LWLockRelease(partitionLock);
1102 [ + - ]: 652 : if (locallock->nLocks == 0)
1103 : 652 : RemoveLocalLock(locallock);
1104 [ + + ]: 652 : if (locallockp)
1105 : 222 : *locallockp = NULL;
1106 : 652 : return LOCKACQUIRE_NOT_AVAIL;
1107 : : }
1108 : : else
1109 : : {
1110 : : /*
1111 : : * We should have gotten the lock, but somehow that didn't
1112 : : * happen. If we get here, it's a bug.
1113 : : */
1114 : : PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1115 : : LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
31 rhaas@postgresql.org 1116 :UNC 0 : LWLockRelease(partitionLock);
1117 [ # # ]: 0 : elog(ERROR, "LockAcquire failed");
1118 : : }
1119 : : }
1120 : : PROCLOCK_PRINT("LockAcquire: granted", proclock);
1121 : : LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1122 : : }
1123 : :
1124 : : /*
1125 : : * Lock state is fully up-to-date now; if we error out after this, no
1126 : : * special error cleanup is required.
1127 : : */
4379 rhaas@postgresql.org 1128 :CBC 2004660 : FinishStrongLockAcquire();
1129 : :
4705 1130 : 2004660 : LWLockRelease(partitionLock);
1131 : :
1132 : : /*
1133 : : * Emit a WAL record if acquisition of this lock needs to be replayed in a
1134 : : * standby server.
1135 : : */
1136 [ + + ]: 2004660 : if (log_lock)
1137 : : {
1138 : : /*
1139 : : * Decode the locktag back to the original values, to avoid sending
1140 : : * lots of empty bytes with every message. See lock.h to check how a
1141 : : * locktag is defined for LOCKTAG_RELATION
1142 : : */
1143 : 81184 : LogAccessExclusiveLock(locktag->locktag_field1,
1144 : 81184 : locktag->locktag_field2);
1145 : : }
1146 : :
1147 : 2004660 : return LOCKACQUIRE_OK;
1148 : : }
1149 : :
1150 : : /*
1151 : : * Find or create LOCK and PROCLOCK objects as needed for a new lock
1152 : : * request.
1153 : : *
1154 : : * Returns the PROCLOCK object, or NULL if we failed to create the objects
1155 : : * for lack of shared memory.
1156 : : *
1157 : : * The appropriate partition lock must be held at entry, and will be
1158 : : * held at exit.
1159 : : */
1160 : : static PROCLOCK *
1161 : 2006992 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1162 : : const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1163 : : {
1164 : : LOCK *lock;
1165 : : PROCLOCK *proclock;
1166 : : PROCLOCKTAG proclocktag;
1167 : : uint32 proclock_hashcode;
1168 : : bool found;
1169 : :
1170 : : /*
1171 : : * Find or create a lock with this tag.
1172 : : */
6475 tgl@sss.pgh.pa.us 1173 : 2006992 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1174 : : locktag,
1175 : : hashcode,
1176 : : HASH_ENTER_NULL,
1177 : : &found);
9716 bruce@momjian.us 1178 [ - + ]: 2006992 : if (!lock)
4705 rhaas@postgresql.org 1179 :UBC 0 : return NULL;
1180 : :
1181 : : /*
1182 : : * if it's a new lock object, initialize it
1183 : : */
9716 bruce@momjian.us 1184 [ + + ]:CBC 2006992 : if (!found)
1185 : : {
8489 tgl@sss.pgh.pa.us 1186 : 1767948 : lock->grantMask = 0;
1187 : 1767948 : lock->waitMask = 0;
452 andres@anarazel.de 1188 : 1767948 : dlist_init(&lock->procLocks);
1189 : 1767948 : dclist_init(&lock->waitProcs);
8489 tgl@sss.pgh.pa.us 1190 : 1767948 : lock->nRequested = 0;
1191 : 1767948 : lock->nGranted = 0;
6913 neilc@samurai.com 1192 [ + - + - : 10607688 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
1193 [ - + - - : 1767948 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
1194 : : LOCK_PRINT("LockAcquire: new", lock, lockmode);
1195 : : }
1196 : : else
1197 : : {
1198 : : LOCK_PRINT("LockAcquire: found", lock, lockmode);
8489 tgl@sss.pgh.pa.us 1199 [ + - - + ]: 239044 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1200 [ + - - + ]: 239044 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1201 [ - + ]: 239044 : Assert(lock->nGranted <= lock->nRequested);
1202 : : }
1203 : :
1204 : : /*
1205 : : * Create the hash key for the proclock table.
1206 : : */
6475 1207 : 2006992 : proclocktag.myLock = lock;
4705 rhaas@postgresql.org 1208 : 2006992 : proclocktag.myProc = proc;
1209 : :
6475 tgl@sss.pgh.pa.us 1210 : 2006992 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1211 : :
1212 : : /*
1213 : : * Find or create a proclock entry with this tag
1214 : : */
1215 : 2006992 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1216 : : &proclocktag,
1217 : : proclock_hashcode,
1218 : : HASH_ENTER_NULL,
1219 : : &found);
7726 bruce@momjian.us 1220 [ - + ]: 2006992 : if (!proclock)
1221 : : {
1222 : : /* Oops, not enough shmem for the proclock */
7154 tgl@sss.pgh.pa.us 1223 [ # # ]:UBC 0 : if (lock->nRequested == 0)
1224 : : {
1225 : : /*
1226 : : * There are no other requestors of this lock, so garbage-collect
1227 : : * the lock object. We *must* do this to avoid a permanent leak
1228 : : * of shared memory, because there won't be anything to cause
1229 : : * anyone to release the lock object later.
1230 : : */
452 andres@anarazel.de 1231 [ # # ]: 0 : Assert(dlist_is_empty(&(lock->procLocks)));
6475 tgl@sss.pgh.pa.us 1232 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
433 peter@eisentraut.org 1233 : 0 : &(lock->tag),
1234 : : hashcode,
1235 : : HASH_REMOVE,
1236 : : NULL))
6905 tgl@sss.pgh.pa.us 1237 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
1238 : : }
4705 rhaas@postgresql.org 1239 : 0 : return NULL;
1240 : : }
1241 : :
1242 : : /*
1243 : : * If new, initialize the new entry
1244 : : */
9716 bruce@momjian.us 1245 [ + + ]:CBC 2006992 : if (!found)
1246 : : {
4705 rhaas@postgresql.org 1247 : 1805018 : uint32 partition = LockHashPartition(hashcode);
1248 : :
1249 : : /*
1250 : : * It might seem unsafe to access proclock->groupLeader without a
1251 : : * lock, but it's not really. Either we are initializing a proclock
1252 : : * on our own behalf, in which case our group leader isn't changing
1253 : : * because the group leader for a process can only ever be changed by
1254 : : * the process itself; or else we are transferring a fast-path lock to
1255 : : * the main lock table, in which case that process can't change it's
1256 : : * lock group leader without first releasing all of its locks (and in
1257 : : * particular the one we are currently transferring).
1258 : : */
2989 1259 : 3610036 : proclock->groupLeader = proc->lockGroupLeader != NULL ?
1260 [ + + ]: 1805018 : proc->lockGroupLeader : proc;
7170 tgl@sss.pgh.pa.us 1261 : 1805018 : proclock->holdMask = 0;
6879 1262 : 1805018 : proclock->releaseMask = 0;
1263 : : /* Add proclock to appropriate lists */
452 andres@anarazel.de 1264 : 1805018 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1265 : 1805018 : dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1266 : : PROCLOCK_PRINT("LockAcquire: new", proclock);
1267 : : }
1268 : : else
1269 : : {
1270 : : PROCLOCK_PRINT("LockAcquire: found", proclock);
7170 tgl@sss.pgh.pa.us 1271 [ - + ]: 201974 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
1272 : :
1273 : : #ifdef CHECK_DEADLOCK_RISK
1274 : :
1275 : : /*
1276 : : * Issue warning if we already hold a lower-level lock on this object
1277 : : * and do not hold a lock of the requested level or higher. This
1278 : : * indicates a deadlock-prone coding practice (eg, we'd have a
1279 : : * deadlock if another backend were following the same code path at
1280 : : * about the same time).
1281 : : *
1282 : : * This is not enabled by default, because it may generate log entries
1283 : : * about user-level coding practices that are in fact safe in context.
1284 : : * It can be enabled to help find system-level problems.
1285 : : *
1286 : : * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1287 : : * better to use a table. For now, though, this works.
1288 : : */
1289 : : {
1290 : : int i;
1291 : :
1292 : : for (i = lockMethodTable->numLockModes; i > 0; i--)
1293 : : {
1294 : : if (proclock->holdMask & LOCKBIT_ON(i))
1295 : : {
1296 : : if (i >= (int) lockmode)
1297 : : break; /* safe: we have a lock >= req level */
1298 : : elog(LOG, "deadlock risk: raising lock level"
1299 : : " from %s to %s on object %u/%u/%u",
1300 : : lockMethodTable->lockModeNames[i],
1301 : : lockMethodTable->lockModeNames[lockmode],
1302 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1303 : : lock->tag.locktag_field3);
1304 : : break;
1305 : : }
1306 : : }
1307 : : }
1308 : : #endif /* CHECK_DEADLOCK_RISK */
1309 : : }
1310 : :
1311 : : /*
1312 : : * lock->nRequested and lock->requested[] count the total number of
1313 : : * requests, whether granted or waiting, so increment those immediately.
1314 : : * The other counts don't increment till we get the lock.
1315 : : */
8489 1316 : 2006992 : lock->nRequested++;
1317 : 2006992 : lock->requested[lockmode]++;
1318 [ + - - + ]: 2006992 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1319 : :
1320 : : /*
1321 : : * We shouldn't already hold the desired lock; else locallock table is
1322 : : * broken.
1323 : : */
6879 1324 [ - + ]: 2006992 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
6879 tgl@sss.pgh.pa.us 1325 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
1326 : : lockMethodTable->lockModeNames[lockmode],
1327 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
1328 : : lock->tag.locktag_field3);
1329 : :
4705 rhaas@postgresql.org 1330 :CBC 2006992 : return proclock;
1331 : : }
1332 : :
1333 : : /*
1334 : : * Check and set/reset the flag that we hold the relation extension lock.
1335 : : *
1336 : : * It is callers responsibility that this function is called after
1337 : : * acquiring/releasing the relation extension lock.
1338 : : *
1339 : : * Pass acquired as true if lock is acquired, false otherwise.
1340 : : */
1341 : : static inline void
1488 akapila@postgresql.o 1342 : 31367105 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1343 : : {
1344 : : #ifdef USE_ASSERT_CHECKING
1345 [ + + ]: 31367105 : if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1346 : 280552 : IsRelationExtensionLockHeld = acquired;
1347 : : #endif
1348 : 31367105 : }
1349 : :
1350 : : /*
1351 : : * Subroutine to free a locallock entry
1352 : : */
1353 : : static void
7170 tgl@sss.pgh.pa.us 1354 : 15450861 : RemoveLocalLock(LOCALLOCK *locallock)
1355 : : {
1356 : : int i;
1357 : :
4315 heikki.linnakangas@i 1358 [ + + ]: 15519886 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
1359 : : {
1360 [ + + ]: 69025 : if (locallock->lockOwners[i].owner != NULL)
1361 : 68988 : ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1362 : : }
3129 tgl@sss.pgh.pa.us 1363 : 15450861 : locallock->numLockOwners = 0;
1364 [ + - ]: 15450861 : if (locallock->lockOwners != NULL)
1365 : 15450861 : pfree(locallock->lockOwners);
7170 1366 : 15450861 : locallock->lockOwners = NULL;
1367 : :
4705 rhaas@postgresql.org 1368 [ + + ]: 15450861 : if (locallock->holdsStrongLockCount)
1369 : : {
1370 : : uint32 fasthashcode;
1371 : :
1372 : 156081 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1373 : :
4653 1374 [ + + ]: 156081 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1375 [ - + ]: 156081 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1376 : 156081 : FastPathStrongRelationLocks->count[fasthashcode]--;
2433 peter_e@gmx.net 1377 : 156081 : locallock->holdsStrongLockCount = false;
4653 rhaas@postgresql.org 1378 : 156081 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1379 : : }
1380 : :
6701 tgl@sss.pgh.pa.us 1381 [ - + ]: 15450861 : if (!hash_search(LockMethodLocalHash,
433 peter@eisentraut.org 1382 : 15450861 : &(locallock->tag),
1383 : : HASH_REMOVE, NULL))
7170 tgl@sss.pgh.pa.us 1384 [ # # ]:UBC 0 : elog(WARNING, "locallock table corrupted");
1385 : :
1386 : : /*
1387 : : * Indicate that the lock is released for certain types of locks
1388 : : */
1488 akapila@postgresql.o 1389 :CBC 15450861 : CheckAndSetLockHeld(locallock, false);
7170 tgl@sss.pgh.pa.us 1390 : 15450861 : }
1391 : :
1392 : : /*
1393 : : * LockCheckConflicts -- test whether requested lock conflicts
1394 : : * with those already granted
1395 : : *
1396 : : * Returns true if conflict, false if no conflict.
1397 : : *
1398 : : * NOTES:
1399 : : * Here's what makes this complicated: one process's locks don't
1400 : : * conflict with one another, no matter what purpose they are held for
1401 : : * (eg, session and transaction locks do not conflict). Nor do the locks
1402 : : * of one process in a lock group conflict with those of another process in
1403 : : * the same group. So, we must subtract off these locks when determining
1404 : : * whether the requested new lock conflicts with those already held.
1405 : : */
1406 : : bool
7440 bruce@momjian.us 1407 : 2006438 : LockCheckConflicts(LockMethod lockMethodTable,
1408 : : LOCKMODE lockmode,
1409 : : LOCK *lock,
1410 : : PROCLOCK *proclock)
1411 : : {
7941 1412 : 2006438 : int numLockModes = lockMethodTable->numLockModes;
1413 : : LOCKMASK myLocks;
2989 rhaas@postgresql.org 1414 : 2006438 : int conflictMask = lockMethodTable->conflictTab[lockmode];
1415 : : int conflictsRemaining[MAX_LOCKMODES];
1416 : 2006438 : int totalConflictsRemaining = 0;
1417 : : dlist_iter proclock_iter;
1418 : : int i;
1419 : :
1420 : : /*
1421 : : * first check for global conflicts: If no locks conflict with my request,
1422 : : * then I get the lock.
1423 : : *
1424 : : * Checking for conflict: lock->grantMask represents the types of
1425 : : * currently held locks. conflictTable[lockmode] has a bit set for each
1426 : : * type of lock that conflicts with request. Bitwise compare tells if
1427 : : * there is a conflict.
1428 : : */
1429 [ + + ]: 2006438 : if (!(conflictMask & lock->grantMask))
1430 : : {
1431 : : PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1568 peter@eisentraut.org 1432 : 1946080 : return false;
1433 : : }
1434 : :
1435 : : /*
1436 : : * Rats. Something conflicts. But it could still be my own lock, or a
1437 : : * lock held by another member of my locking group. First, figure out how
1438 : : * many conflicts remain after subtracting out any locks I hold myself.
1439 : : */
6879 tgl@sss.pgh.pa.us 1440 : 60358 : myLocks = proclock->holdMask;
7440 bruce@momjian.us 1441 [ + + ]: 543222 : for (i = 1; i <= numLockModes; i++)
1442 : : {
2989 rhaas@postgresql.org 1443 [ + + ]: 482864 : if ((conflictMask & LOCKBIT_ON(i)) == 0)
1444 : : {
1445 : 243151 : conflictsRemaining[i] = 0;
1446 : 243151 : continue;
1447 : : }
1448 : 239713 : conflictsRemaining[i] = lock->granted[i];
1449 [ + + ]: 239713 : if (myLocks & LOCKBIT_ON(i))
1450 : 62558 : --conflictsRemaining[i];
1451 : 239713 : totalConflictsRemaining += conflictsRemaining[i];
1452 : : }
1453 : :
1454 : : /* If no conflicts remain, we get the lock. */
1455 [ + + ]: 60358 : if (totalConflictsRemaining == 0)
1456 : : {
1457 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1568 peter@eisentraut.org 1458 : 58136 : return false;
1459 : : }
1460 : :
1461 : : /* If no group locking, it's definitely a conflict. */
2989 rhaas@postgresql.org 1462 [ + + + - ]: 2222 : if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1463 : : {
1464 [ - + ]: 1701 : Assert(proclock->tag.myProc == MyProc);
1465 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1466 : : proclock);
1568 peter@eisentraut.org 1467 : 1701 : return true;
1468 : : }
1469 : :
1470 : : /*
1471 : : * The relation extension lock conflict even between the group members.
1472 : : */
283 akapila@postgresql.o 1473 [ + + ]: 521 : if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1474 : : {
1475 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1476 : : proclock);
1486 1477 : 34 : return true;
1478 : : }
1479 : :
1480 : : /*
1481 : : * Locks held in conflicting modes by members of our own lock group are
1482 : : * not real conflicts; we can subtract those out and see if we still have
1483 : : * a conflict. This is O(N) in the number of processes holding or
1484 : : * awaiting locks on this object. We could improve that by making the
1485 : : * shared memory state more complex (and larger) but it doesn't seem worth
1486 : : * it.
1487 : : */
452 andres@anarazel.de 1488 [ + - + + ]: 708 : dlist_foreach(proclock_iter, &lock->procLocks)
1489 : : {
1490 : 667 : PROCLOCK *otherproclock =
1491 : 667 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1492 : :
2989 rhaas@postgresql.org 1493 [ + + ]: 667 : if (proclock != otherproclock &&
1494 [ + + ]: 626 : proclock->groupLeader == otherproclock->groupLeader &&
1495 [ + + ]: 454 : (otherproclock->holdMask & conflictMask) != 0)
1496 : : {
2866 1497 : 452 : int intersectMask = otherproclock->holdMask & conflictMask;
1498 : :
2989 1499 [ + + ]: 4068 : for (i = 1; i <= numLockModes; i++)
1500 : : {
1501 [ + + ]: 3616 : if ((intersectMask & LOCKBIT_ON(i)) != 0)
1502 : : {
1503 [ - + ]: 459 : if (conflictsRemaining[i] <= 0)
2989 rhaas@postgresql.org 1504 [ # # ]:UBC 0 : elog(PANIC, "proclocks held do not match lock");
2989 rhaas@postgresql.org 1505 :CBC 459 : conflictsRemaining[i]--;
1506 : 459 : totalConflictsRemaining--;
1507 : : }
1508 : : }
1509 : :
1510 [ + + ]: 452 : if (totalConflictsRemaining == 0)
1511 : : {
1512 : : PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1513 : : proclock);
1568 peter@eisentraut.org 1514 : 446 : return false;
1515 : : }
1516 : : }
1517 : : }
1518 : :
1519 : : /* Nope, it's a real conflict. */
1520 : : PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1521 : 41 : return true;
1522 : : }
1523 : :
1524 : : /*
1525 : : * GrantLock -- update the lock and proclock data structures to show
1526 : : * the lock request has been granted.
1527 : : *
1528 : : * NOTE: if proc was blocked, it also needs to be removed from the wait list
1529 : : * and have its waitLock/waitProcLock fields cleared. That's not done here.
1530 : : *
1531 : : * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1532 : : * table entry; but since we may be awaking some other process, we can't do
1533 : : * that here; it's done by GrantLockLocal, instead.
1534 : : */
1535 : : void
7726 bruce@momjian.us 1536 : 2006377 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1537 : : {
8489 tgl@sss.pgh.pa.us 1538 : 2006377 : lock->nGranted++;
1539 : 2006377 : lock->granted[lockmode]++;
7440 bruce@momjian.us 1540 : 2006377 : lock->grantMask |= LOCKBIT_ON(lockmode);
8489 tgl@sss.pgh.pa.us 1541 [ + + ]: 2006377 : if (lock->granted[lockmode] == lock->requested[lockmode])
7440 bruce@momjian.us 1542 : 2006192 : lock->waitMask &= LOCKBIT_OFF(lockmode);
7170 tgl@sss.pgh.pa.us 1543 : 2006377 : proclock->holdMask |= LOCKBIT_ON(lockmode);
1544 : : LOCK_PRINT("GrantLock", lock, lockmode);
8489 1545 [ + - - + ]: 2006377 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1546 [ - + ]: 2006377 : Assert(lock->nGranted <= lock->nRequested);
7170 1547 : 2006377 : }
1548 : :
1549 : : /*
1550 : : * UnGrantLock -- opposite of GrantLock.
1551 : : *
1552 : : * Updates the lock and proclock data structures to show that the lock
1553 : : * is no longer held nor requested by the current holder.
1554 : : *
1555 : : * Returns true if there were any waiters waiting on the lock that
1556 : : * should now be woken up with ProcLockWakeup.
1557 : : */
1558 : : static bool
7009 neilc@samurai.com 1559 : 2006283 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1560 : : PROCLOCK *proclock, LockMethod lockMethodTable)
1561 : : {
6756 bruce@momjian.us 1562 : 2006283 : bool wakeupNeeded = false;
1563 : :
7009 neilc@samurai.com 1564 [ + - - + ]: 2006283 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1565 [ + - - + ]: 2006283 : Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1566 [ - + ]: 2006283 : Assert(lock->nGranted <= lock->nRequested);
1567 : :
1568 : : /*
1569 : : * fix the general lock stats
1570 : : */
1571 : 2006283 : lock->nRequested--;
1572 : 2006283 : lock->requested[lockmode]--;
1573 : 2006283 : lock->nGranted--;
1574 : 2006283 : lock->granted[lockmode]--;
1575 : :
1576 [ + + ]: 2006283 : if (lock->granted[lockmode] == 0)
1577 : : {
1578 : : /* change the conflict mask. No more of this lock type. */
1579 : 1973665 : lock->grantMask &= LOCKBIT_OFF(lockmode);
1580 : : }
1581 : :
1582 : : LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1583 : :
1584 : : /*
1585 : : * We need only run ProcLockWakeup if the released lock conflicts with at
1586 : : * least one of the lock types requested by waiter(s). Otherwise whatever
1587 : : * conflict made them wait must still exist. NOTE: before MVCC, we could
1588 : : * skip wakeup if lock->granted[lockmode] was still positive. But that's
1589 : : * not true anymore, because the remaining granted locks might belong to
1590 : : * some waiter, who could now be awakened because he doesn't conflict with
1591 : : * his own locks.
1592 : : */
1593 [ + + ]: 2006283 : if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1594 : 1017 : wakeupNeeded = true;
1595 : :
1596 : : /*
1597 : : * Now fix the per-proclock state.
1598 : : */
1599 : 2006283 : proclock->holdMask &= LOCKBIT_OFF(lockmode);
1600 : : PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1601 : :
1602 : 2006283 : return wakeupNeeded;
1603 : : }
1604 : :
1605 : : /*
1606 : : * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1607 : : * proclock and lock objects if possible, and call ProcLockWakeup if there
1608 : : * are remaining requests and the caller says it's OK. (Normally, this
1609 : : * should be called after UnGrantLock, and wakeupNeeded is the result from
1610 : : * UnGrantLock.)
1611 : : *
1612 : : * The appropriate partition lock must be held at entry, and will be
1613 : : * held at exit.
1614 : : */
1615 : : static void
6699 tgl@sss.pgh.pa.us 1616 : 1983810 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1617 : : LockMethod lockMethodTable, uint32 hashcode,
1618 : : bool wakeupNeeded)
1619 : : {
1620 : : /*
1621 : : * If this was my last hold on this lock, delete my entry in the proclock
1622 : : * table.
1623 : : */
6905 1624 [ + + ]: 1983810 : if (proclock->holdMask == 0)
1625 : : {
1626 : : uint32 proclock_hashcode;
1627 : :
1628 : : PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
452 andres@anarazel.de 1629 : 1804553 : dlist_delete(&proclock->lockLink);
1630 : 1804553 : dlist_delete(&proclock->procLink);
6475 tgl@sss.pgh.pa.us 1631 : 1804553 : proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1632 [ - + ]: 1804553 : if (!hash_search_with_hash_value(LockMethodProcLockHash,
433 peter@eisentraut.org 1633 : 1804553 : &(proclock->tag),
1634 : : proclock_hashcode,
1635 : : HASH_REMOVE,
1636 : : NULL))
6905 tgl@sss.pgh.pa.us 1637 [ # # ]:UBC 0 : elog(PANIC, "proclock table corrupted");
1638 : : }
1639 : :
6905 tgl@sss.pgh.pa.us 1640 [ + + ]:CBC 1983810 : if (lock->nRequested == 0)
1641 : : {
1642 : : /*
1643 : : * The caller just released the last lock, so garbage-collect the lock
1644 : : * object.
1645 : : */
1646 : : LOCK_PRINT("CleanUpLock: deleting", lock, 0);
452 andres@anarazel.de 1647 [ - + ]: 1767932 : Assert(dlist_is_empty(&lock->procLocks));
6475 tgl@sss.pgh.pa.us 1648 [ - + ]: 1767932 : if (!hash_search_with_hash_value(LockMethodLockHash,
433 peter@eisentraut.org 1649 : 1767932 : &(lock->tag),
1650 : : hashcode,
1651 : : HASH_REMOVE,
1652 : : NULL))
6905 tgl@sss.pgh.pa.us 1653 [ # # ]:UBC 0 : elog(PANIC, "lock table corrupted");
1654 : : }
6905 tgl@sss.pgh.pa.us 1655 [ + + ]:CBC 215878 : else if (wakeupNeeded)
1656 : : {
1657 : : /* There are waiters on this lock, so wake them up. */
6699 1658 : 1055 : ProcLockWakeup(lockMethodTable, lock);
1659 : : }
6905 1660 : 1983810 : }
1661 : :
1662 : : /*
1663 : : * GrantLockLocal -- update the locallock data structures to show
1664 : : * the lock request has been granted.
1665 : : *
1666 : : * We expect that LockAcquire made sure there is room to add a new
1667 : : * ResourceOwner entry.
1668 : : */
1669 : : static void
7170 1670 : 17131484 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1671 : : {
1672 : 17131484 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1673 : : int i;
1674 : :
1675 [ - + ]: 17131484 : Assert(locallock->numLockOwners < locallock->maxLockOwners);
1676 : : /* Count the total */
1677 : 17131484 : locallock->nLocks++;
1678 : : /* Count the per-owner lock */
1679 [ + + ]: 17762185 : for (i = 0; i < locallock->numLockOwners; i++)
1680 : : {
1681 [ + + ]: 1845941 : if (lockOwners[i].owner == owner)
1682 : : {
1683 : 1215240 : lockOwners[i].nLocks++;
1684 : 1215240 : return;
1685 : : }
1686 : : }
1687 : 15916244 : lockOwners[i].owner = owner;
1688 : 15916244 : lockOwners[i].nLocks = 1;
1689 : 15916244 : locallock->numLockOwners++;
4315 heikki.linnakangas@i 1690 [ + + ]: 15916244 : if (owner != NULL)
1691 : 15809097 : ResourceOwnerRememberLock(owner, locallock);
1692 : :
1693 : : /* Indicate that the lock is acquired for certain types of locks. */
1488 akapila@postgresql.o 1694 : 15916244 : CheckAndSetLockHeld(locallock, true);
1695 : : }
1696 : :
1697 : : /*
1698 : : * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1699 : : * and arrange for error cleanup if it fails
1700 : : */
1701 : : static void
4379 rhaas@postgresql.org 1702 : 156365 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1703 : : {
1704 [ - + ]: 156365 : Assert(StrongLockInProgress == NULL);
2433 peter_e@gmx.net 1705 [ - + ]: 156365 : Assert(locallock->holdsStrongLockCount == false);
1706 : :
1707 : : /*
1708 : : * Adding to a memory location is not atomic, so we take a spinlock to
1709 : : * ensure we don't collide with someone else trying to bump the count at
1710 : : * the same time.
1711 : : *
1712 : : * XXX: It might be worth considering using an atomic fetch-and-add
1713 : : * instruction here, on architectures where that is supported.
1714 : : */
1715 : :
4379 rhaas@postgresql.org 1716 [ + + ]: 156365 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1717 : 156365 : FastPathStrongRelationLocks->count[fasthashcode]++;
2433 peter_e@gmx.net 1718 : 156365 : locallock->holdsStrongLockCount = true;
4379 rhaas@postgresql.org 1719 : 156365 : StrongLockInProgress = locallock;
1720 : 156365 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1721 : 156365 : }
1722 : :
1723 : : /*
1724 : : * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1725 : : * acquisition once it's no longer needed
1726 : : */
1727 : : static void
1728 : 2004660 : FinishStrongLockAcquire(void)
1729 : : {
1730 : 2004660 : StrongLockInProgress = NULL;
1731 : 2004660 : }
1732 : :
1733 : : /*
1734 : : * AbortStrongLockAcquire - undo strong lock state changes performed by
1735 : : * BeginStrongLockAcquire.
1736 : : */
1737 : : void
1738 : 461469 : AbortStrongLockAcquire(void)
1739 : : {
1740 : : uint32 fasthashcode;
1741 : 461469 : LOCALLOCK *locallock = StrongLockInProgress;
1742 : :
1743 [ + + ]: 461469 : if (locallock == NULL)
1744 : 461255 : return;
1745 : :
1746 : 214 : fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
2433 peter_e@gmx.net 1747 [ - + ]: 214 : Assert(locallock->holdsStrongLockCount == true);
4379 rhaas@postgresql.org 1748 [ - + ]: 214 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3660 1749 [ - + ]: 214 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
4379 1750 : 214 : FastPathStrongRelationLocks->count[fasthashcode]--;
2433 peter_e@gmx.net 1751 : 214 : locallock->holdsStrongLockCount = false;
4379 rhaas@postgresql.org 1752 : 214 : StrongLockInProgress = NULL;
1753 : 214 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1754 : : }
1755 : :
1756 : : /*
1757 : : * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1758 : : * WaitOnLock on.
1759 : : *
1760 : : * proc.c needs this for the case where we are booted off the lock by
1761 : : * timeout, but discover that someone granted us the lock anyway.
1762 : : *
1763 : : * We could just export GrantLockLocal, but that would require including
1764 : : * resowner.h in lock.h, which creates circularity.
1765 : : */
1766 : : void
7170 tgl@sss.pgh.pa.us 1767 : 1067 : GrantAwaitedLock(void)
1768 : : {
1769 : 1067 : GrantLockLocal(awaitedLock, awaitedOwner);
9364 scrappy@hub.org 1770 : 1067 : }
1771 : :
1772 : : /*
1773 : : * MarkLockClear -- mark an acquired lock as "clear"
1774 : : *
1775 : : * This means that we know we have absorbed all sinval messages that other
1776 : : * sessions generated before we acquired this lock, and so we can confidently
1777 : : * assume we know about any catalog changes protected by this lock.
1778 : : */
1779 : : void
2046 tgl@sss.pgh.pa.us 1780 : 15055297 : MarkLockClear(LOCALLOCK *locallock)
1781 : : {
1782 [ - + ]: 15055297 : Assert(locallock->nLocks > 0);
1783 : 15055297 : locallock->lockCleared = true;
1784 : 15055297 : }
1785 : :
1786 : : /*
1787 : : * WaitOnLock -- wait to acquire a lock
1788 : : *
1789 : : * Caller must have set MyProc->heldLocks to reflect locks already held
1790 : : * on the lockable object by this process.
1791 : : *
1792 : : * The appropriate partition lock must be held at entry, and will still be
1793 : : * held at exit.
1794 : : */
1795 : : static void
31 rhaas@postgresql.org 1796 :GNC 1762 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
1797 : : {
6699 tgl@sss.pgh.pa.us 1798 :CBC 1762 : LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
7440 bruce@momjian.us 1799 : 1762 : LockMethod lockMethodTable = LockMethods[lockmethodid];
1800 : :
1801 : : LOCK_PRINT("WaitOnLock: sleeping on lock",
1802 : : locallock->lock, locallock->tag.mode);
1803 : :
1804 : : /* adjust the process title to indicate that it's waiting */
419 drowley@postgresql.o 1805 : 1762 : set_ps_display_suffix("waiting");
1806 : :
7170 tgl@sss.pgh.pa.us 1807 : 1762 : awaitedLock = locallock;
1808 : 1762 : awaitedOwner = owner;
1809 : :
1810 : : /*
1811 : : * NOTE: Think not to put any shared-state cleanup after the call to
1812 : : * ProcSleep, in either the normal or failure path. The lock state must
1813 : : * be fully set by the lock grantor, or by CheckDeadLock if we give up
1814 : : * waiting for the lock. This is necessary because of the possibility
1815 : : * that a cancel/die interrupt will interrupt ProcSleep after someone else
1816 : : * grants us the lock, but before we've noticed it. Hence, after granting,
1817 : : * the locktable state must fully reflect the fact that we own the lock;
1818 : : * we can't do additional work on return.
1819 : : *
1820 : : * We can and do use a PG_TRY block to try to clean up after failure, but
1821 : : * this still has a major limitation: elog(FATAL) can occur while waiting
1822 : : * (eg, a "die" interrupt), and then control won't come back here. So all
1823 : : * cleanup of essential state should happen in LockErrorCleanup, not here.
1824 : : * We can use PG_TRY to clear the "waiting" status flags, since doing that
1825 : : * is unimportant if the process exits.
1826 : : */
5916 1827 [ + + ]: 1762 : PG_TRY();
1828 : : {
1829 : : /*
1830 : : * If dontWait = true, we handle success and failure in the same way
1831 : : * here. The caller will be able to sort out what has happened.
1832 : : */
31 rhaas@postgresql.org 1833 [ + + ]:GNC 1762 : if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
1834 [ + + ]: 657 : && !dontWait)
1835 : : {
1836 : :
1837 : : /*
1838 : : * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1839 : : * now.
1840 : : */
5916 tgl@sss.pgh.pa.us 1841 :CBC 5 : awaitedLock = NULL;
1842 : : LOCK_PRINT("WaitOnLock: aborting on lock",
1843 : : locallock->lock, locallock->tag.mode);
1844 : 5 : LWLockRelease(LockHashPartitionLock(locallock->hashcode));
1845 : :
1846 : : /*
1847 : : * Now that we aren't holding the partition lock, we can give an
1848 : : * error report including details about the detected deadlock.
1849 : : */
1850 : 5 : DeadLockReport();
1851 : : /* not reached */
1852 : : }
1853 : : }
1854 : 43 : PG_CATCH();
1855 : : {
1856 : : /* In this path, awaitedLock remains set until LockErrorCleanup */
1857 : :
1858 : : /* reset ps display to remove the suffix */
419 drowley@postgresql.o 1859 : 43 : set_ps_display_remove_suffix();
1860 : :
1861 : : /* and propagate the error */
5916 tgl@sss.pgh.pa.us 1862 : 43 : PG_RE_THROW();
1863 : : }
1864 [ - + ]: 1718 : PG_END_TRY();
1865 : :
7170 1866 : 1718 : awaitedLock = NULL;
1867 : :
1868 : : /* reset ps display to remove the suffix */
419 drowley@postgresql.o 1869 : 1718 : set_ps_display_remove_suffix();
1870 : :
1871 : : LOCK_PRINT("WaitOnLock: wakeup on lock",
1872 : : locallock->lock, locallock->tag.mode);
10141 scrappy@hub.org 1873 : 1718 : }
1874 : :
1875 : : /*
1876 : : * Remove a proc from the wait-queue it is on (caller must know it is on one).
1877 : : * This is only used when the proc has failed to get the lock, so we set its
1878 : : * waitStatus to PROC_WAIT_STATUS_ERROR.
1879 : : *
1880 : : * Appropriate partition lock must be held by caller. Also, caller is
1881 : : * responsible for signaling the proc if needed.
1882 : : *
1883 : : * NB: this does not clean up any locallock object that may exist for the lock.
1884 : : */
1885 : : void
6475 tgl@sss.pgh.pa.us 1886 : 42 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
1887 : : {
8424 bruce@momjian.us 1888 : 42 : LOCK *waitLock = proc->waitLock;
6984 tgl@sss.pgh.pa.us 1889 : 42 : PROCLOCK *proclock = proc->waitProcLock;
8424 bruce@momjian.us 1890 : 42 : LOCKMODE lockmode = proc->waitLockMode;
6984 tgl@sss.pgh.pa.us 1891 : 42 : LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1892 : :
1893 : : /* Make sure proc is waiting */
1397 peter@eisentraut.org 1894 [ - + ]: 42 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
5642 tgl@sss.pgh.pa.us 1895 [ - + ]: 42 : Assert(proc->links.next != NULL);
8480 1896 [ - + ]: 42 : Assert(waitLock);
452 andres@anarazel.de 1897 [ - + ]: 42 : Assert(!dclist_is_empty(&waitLock->waitProcs));
6701 tgl@sss.pgh.pa.us 1898 [ + - - + ]: 42 : Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1899 : :
1900 : : /* Remove proc from lock's wait queue */
263 msawada@postgresql.o 1901 : 42 : dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1902 : :
1903 : : /* Undo increments of request counts by waiting process */
8480 tgl@sss.pgh.pa.us 1904 [ - + ]: 42 : Assert(waitLock->nRequested > 0);
1905 [ - + ]: 42 : Assert(waitLock->nRequested > proc->waitLock->nGranted);
1906 : 42 : waitLock->nRequested--;
1907 [ - + ]: 42 : Assert(waitLock->requested[lockmode] > 0);
1908 : 42 : waitLock->requested[lockmode]--;
1909 : : /* don't forget to clear waitMask bit if appropriate */
1910 [ + + ]: 42 : if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
7440 bruce@momjian.us 1911 : 41 : waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1912 : :
1913 : : /* Clean up the proc's own state, and pass it the ok/fail signal */
8480 tgl@sss.pgh.pa.us 1914 : 42 : proc->waitLock = NULL;
7170 1915 : 42 : proc->waitProcLock = NULL;
1397 peter@eisentraut.org 1916 : 42 : proc->waitStatus = PROC_WAIT_STATUS_ERROR;
1917 : :
1918 : : /*
1919 : : * Delete the proclock immediately if it represents no already-held locks.
1920 : : * (This must happen now because if the owner of the lock decides to
1921 : : * release it, and the requested/granted counts then go to zero,
1922 : : * LockRelease expects there to be no remaining proclocks.) Then see if
1923 : : * any other waiters for the lock can be woken up now.
1924 : : */
6699 tgl@sss.pgh.pa.us 1925 : 42 : CleanUpLock(waitLock, proclock,
6475 1926 : 42 : LockMethods[lockmethodid], hashcode,
1927 : : true);
8480 1928 : 42 : }
1929 : :
1930 : : /*
1931 : : * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1932 : : * Release a session lock if 'sessionLock' is true, else release a
1933 : : * regular transaction lock.
1934 : : *
1935 : : * Side Effects: find any waiting processes that are now wakable,
1936 : : * grant them their requested locks and awaken them.
1937 : : * (We have to grant the lock here to avoid a race between
1938 : : * the waking process and any new process to
1939 : : * come along and request the lock.)
1940 : : */
1941 : : bool
6701 1942 : 15203995 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1943 : : {
1944 : 15203995 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1945 : : LockMethod lockMethodTable;
1946 : : LOCALLOCKTAG localtag;
1947 : : LOCALLOCK *locallock;
1948 : : LOCK *lock;
1949 : : PROCLOCK *proclock;
1950 : : LWLock *partitionLock;
1951 : : bool wakeupNeeded;
1952 : :
1953 [ + - - + ]: 15203995 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6701 tgl@sss.pgh.pa.us 1954 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6701 tgl@sss.pgh.pa.us 1955 :CBC 15203995 : lockMethodTable = LockMethods[lockmethodid];
1956 [ + - - + ]: 15203995 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
6701 tgl@sss.pgh.pa.us 1957 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
1958 : :
1959 : : #ifdef LOCK_DEBUG
1960 : : if (LOCK_DEBUG_ENABLED(locktag))
1961 : : elog(LOG, "LockRelease: lock [%u,%u] %s",
1962 : : locktag->locktag_field1, locktag->locktag_field2,
1963 : : lockMethodTable->lockModeNames[lockmode]);
1964 : : #endif
1965 : :
1966 : : /*
1967 : : * Find the LOCALLOCK entry for this lock and lockmode
1968 : : */
2489 tgl@sss.pgh.pa.us 1969 [ + - - + :CBC 15203995 : MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
- - - - -
- ]
7170 1970 : 15203995 : localtag.lock = *locktag;
1971 : 15203995 : localtag.mode = lockmode;
1972 : :
6701 1973 : 15203995 : locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1974 : : &localtag,
1975 : : HASH_FIND, NULL);
1976 : :
1977 : : /*
1978 : : * let the caller print its own error message, too. Do not ereport(ERROR).
1979 : : */
7170 1980 [ + + - + ]: 15203995 : if (!locallock || locallock->nLocks <= 0)
1981 : : {
1982 [ + - ]: 13 : elog(WARNING, "you don't own a lock of type %s",
1983 : : lockMethodTable->lockModeNames[lockmode]);
2433 peter_e@gmx.net 1984 : 13 : return false;
1985 : : }
1986 : :
1987 : : /*
1988 : : * Decrease the count for the resource owner.
1989 : : */
1990 : : {
7170 tgl@sss.pgh.pa.us 1991 : 15203982 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1992 : : ResourceOwner owner;
1993 : : int i;
1994 : :
1995 : : /* Identify owner for lock */
4363 1996 [ + + ]: 15203982 : if (sessionLock)
7170 1997 : 107130 : owner = NULL;
1998 : : else
4363 1999 : 15096852 : owner = CurrentResourceOwner;
2000 : :
7170 2001 [ + + ]: 15204865 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2002 : : {
2003 [ + + ]: 15204853 : if (lockOwners[i].owner == owner)
2004 : : {
2005 [ - + ]: 15203970 : Assert(lockOwners[i].nLocks > 0);
2006 [ + + ]: 15203970 : if (--lockOwners[i].nLocks == 0)
2007 : : {
4315 heikki.linnakangas@i 2008 [ + + ]: 14768324 : if (owner != NULL)
2009 : 14661216 : ResourceOwnerForgetLock(owner, locallock);
2010 : : /* compact out unused slot */
7170 tgl@sss.pgh.pa.us 2011 : 14768324 : locallock->numLockOwners--;
2012 [ + + ]: 14768324 : if (i < locallock->numLockOwners)
2013 : 52 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2014 : : }
2015 : 15203970 : break;
2016 : : }
2017 : : }
2018 [ + + ]: 15203982 : if (i < 0)
2019 : : {
2020 : : /* don't release a lock belonging to another owner */
2021 [ + - ]: 12 : elog(WARNING, "you don't own a lock of type %s",
2022 : : lockMethodTable->lockModeNames[lockmode]);
2433 peter_e@gmx.net 2023 : 12 : return false;
2024 : : }
2025 : : }
2026 : :
2027 : : /*
2028 : : * Decrease the total local count. If we're still holding the lock, we're
2029 : : * done.
2030 : : */
7170 tgl@sss.pgh.pa.us 2031 : 15203970 : locallock->nLocks--;
2032 : :
2033 [ + + ]: 15203970 : if (locallock->nLocks > 0)
2433 peter_e@gmx.net 2034 : 709724 : return true;
2035 : :
2036 : : /*
2037 : : * At this point we can no longer suppose we are clear of invalidation
2038 : : * messages related to this lock. Although we'll delete the LOCALLOCK
2039 : : * object before any intentional return from this routine, it seems worth
2040 : : * the trouble to explicitly reset lockCleared right now, just in case
2041 : : * some error prevents us from deleting the LOCALLOCK.
2042 : : */
2046 tgl@sss.pgh.pa.us 2043 : 14494246 : locallock->lockCleared = false;
2044 : :
2045 : : /* Attempt fast release of any lock eligible for the fast path. */
3791 2046 [ + + + + : 14494246 : if (EligibleForRelationFastPath(locktag, lockmode) &&
+ + + + +
+ ]
2047 [ + + ]: 13466513 : FastPathLocalUseCount > 0)
2048 : : {
2049 : : bool released;
2050 : :
2051 : : /*
2052 : : * We might not find the lock here, even if we originally entered it
2053 : : * here. Another backend may have moved it to the main table.
2054 : : */
1430 2055 : 13355075 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4653 rhaas@postgresql.org 2056 : 13355075 : released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2057 : : lockmode);
1430 tgl@sss.pgh.pa.us 2058 : 13355075 : LWLockRelease(&MyProc->fpInfoLock);
4705 rhaas@postgresql.org 2059 [ + + ]: 13355075 : if (released)
2060 : : {
2061 : 12951847 : RemoveLocalLock(locallock);
2433 peter_e@gmx.net 2062 : 12951847 : return true;
2063 : : }
2064 : : }
2065 : :
2066 : : /*
2067 : : * Otherwise we've got to mess with the shared lock table.
2068 : : */
6475 tgl@sss.pgh.pa.us 2069 : 1542399 : partitionLock = LockHashPartitionLock(locallock->hashcode);
2070 : :
6699 2071 : 1542399 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2072 : :
2073 : : /*
2074 : : * Normally, we don't need to re-find the lock or proclock, since we kept
2075 : : * their addresses in the locallock table, and they couldn't have been
2076 : : * removed while we were holding a lock on them. But it's possible that
2077 : : * the lock was taken fast-path and has since been moved to the main hash
2078 : : * table by another backend, in which case we will need to look up the
2079 : : * objects here. We assume the lock field is NULL if so.
2080 : : */
7170 2081 : 1542399 : lock = locallock->lock;
4705 rhaas@postgresql.org 2082 [ + + ]: 1542399 : if (!lock)
2083 : : {
2084 : : PROCLOCKTAG proclocktag;
2085 : :
4337 2086 [ + - + - : 5 : Assert(EligibleForRelationFastPath(locktag, lockmode));
+ - + - -
+ ]
4705 2087 : 5 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2088 : : locktag,
2089 : : locallock->hashcode,
2090 : : HASH_FIND,
2091 : : NULL);
3791 tgl@sss.pgh.pa.us 2092 [ - + ]: 5 : if (!lock)
3791 tgl@sss.pgh.pa.us 2093 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
4705 rhaas@postgresql.org 2094 :CBC 5 : locallock->lock = lock;
2095 : :
2096 : 5 : proclocktag.myLock = lock;
2097 : 5 : proclocktag.myProc = MyProc;
2098 : 5 : locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2099 : : &proclocktag,
2100 : : HASH_FIND,
2101 : : NULL);
3791 tgl@sss.pgh.pa.us 2102 [ - + ]: 5 : if (!locallock->proclock)
3791 tgl@sss.pgh.pa.us 2103 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
2104 : : }
2105 : : LOCK_PRINT("LockRelease: found", lock, lockmode);
7170 tgl@sss.pgh.pa.us 2106 :CBC 1542399 : proclock = locallock->proclock;
2107 : : PROCLOCK_PRINT("LockRelease: found", proclock);
2108 : :
2109 : : /*
2110 : : * Double-check that we are actually holding a lock of the type we want to
2111 : : * release.
2112 : : */
2113 [ - + ]: 1542399 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2114 : : {
2115 : : PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
6699 tgl@sss.pgh.pa.us 2116 :UBC 0 : LWLockRelease(partitionLock);
7570 2117 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
2118 : : lockMethodTable->lockModeNames[lockmode]);
7170 2119 : 0 : RemoveLocalLock(locallock);
2433 peter_e@gmx.net 2120 : 0 : return false;
2121 : : }
2122 : :
2123 : : /*
2124 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2125 : : */
6905 tgl@sss.pgh.pa.us 2126 :CBC 1542399 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2127 : :
6699 2128 : 1542399 : CleanUpLock(lock, proclock,
2129 : : lockMethodTable, locallock->hashcode,
2130 : : wakeupNeeded);
2131 : :
2132 : 1542399 : LWLockRelease(partitionLock);
2133 : :
7170 2134 : 1542399 : RemoveLocalLock(locallock);
2433 peter_e@gmx.net 2135 : 1542399 : return true;
2136 : : }
2137 : :
2138 : : /*
2139 : : * LockReleaseAll -- Release all locks of the specified lock method that
2140 : : * are held by the current process.
2141 : : *
2142 : : * Well, not necessarily *all* locks. The available behaviors are:
2143 : : * allLocks == true: release all locks including session locks.
2144 : : * allLocks == false: release all non-session locks.
2145 : : */
2146 : : void
6879 tgl@sss.pgh.pa.us 2147 : 882058 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2148 : : {
2149 : : HASH_SEQ_STATUS status;
2150 : : LockMethod lockMethodTable;
2151 : : int i,
2152 : : numLockModes;
2153 : : LOCALLOCK *locallock;
2154 : : LOCK *lock;
2155 : : int partition;
4705 rhaas@postgresql.org 2156 : 882058 : bool have_fast_path_lwlock = false;
2157 : :
6701 tgl@sss.pgh.pa.us 2158 [ + - - + ]: 882058 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6701 tgl@sss.pgh.pa.us 2159 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6701 tgl@sss.pgh.pa.us 2160 :CBC 882058 : lockMethodTable = LockMethods[lockmethodid];
2161 : :
2162 : : #ifdef LOCK_DEBUG
2163 : : if (*(lockMethodTable->trace_flag))
2164 : : elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2165 : : #endif
2166 : :
2167 : : /*
2168 : : * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2169 : : * the only way that the lock we hold on our own VXID can ever get
2170 : : * released: it is always and only released when a toplevel transaction
2171 : : * ends.
2172 : : */
4637 rhaas@postgresql.org 2173 [ + + ]: 882058 : if (lockmethodid == DEFAULT_LOCKMETHOD)
2174 : 433353 : VirtualXactLockTableCleanup();
2175 : :
7941 bruce@momjian.us 2176 : 882058 : numLockModes = lockMethodTable->numLockModes;
2177 : :
2178 : : /*
2179 : : * First we run through the locallock table and get rid of unwanted
2180 : : * entries, then we scan the process's proclocks and get rid of those. We
2181 : : * do this separately because we may have multiple locallock entries
2182 : : * pointing to the same proclock, and we daren't end up with any dangling
2183 : : * pointers. Fast-path locks are cleaned up during the locallock table
2184 : : * scan, though.
2185 : : */
6701 tgl@sss.pgh.pa.us 2186 : 882058 : hash_seq_init(&status, LockMethodLocalHash);
2187 : :
7170 2188 [ + + ]: 2029675 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2189 : : {
2190 : : /*
2191 : : * If the LOCALLOCK entry is unused, we must've run out of shared
2192 : : * memory while trying to set up this lock. Just forget the local
2193 : : * entry.
2194 : : */
4337 rhaas@postgresql.org 2195 [ + + ]: 1147617 : if (locallock->nLocks == 0)
2196 : : {
2197 : 43 : RemoveLocalLock(locallock);
2198 : 43 : continue;
2199 : : }
2200 : :
2201 : : /* Ignore items that are not of the lockmethod to be removed */
2202 [ + + ]: 1147574 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2203 : 96317 : continue;
2204 : :
2205 : : /*
2206 : : * If we are asked to release all locks, we can just zap the entry.
2207 : : * Otherwise, must scan to see if there are session locks. We assume
2208 : : * there is at most one lockOwners entry for session locks.
2209 : : */
2210 [ + + ]: 1051257 : if (!allLocks)
2211 : : {
2212 : 983801 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2213 : :
2214 : : /* If session lock is above array position 0, move it down to 0 */
3973 bruce@momjian.us 2215 [ + + ]: 2055387 : for (i = 0; i < locallock->numLockOwners; i++)
2216 : : {
4337 rhaas@postgresql.org 2217 [ + + ]: 1071586 : if (lockOwners[i].owner == NULL)
2218 : 96234 : lockOwners[0] = lockOwners[i];
2219 : : else
4315 heikki.linnakangas@i 2220 : 975352 : ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2221 : : }
2222 : :
4337 rhaas@postgresql.org 2223 [ + - ]: 983801 : if (locallock->numLockOwners > 0 &&
2224 [ + + ]: 983801 : lockOwners[0].owner == NULL &&
2225 [ + - ]: 96234 : lockOwners[0].nLocks > 0)
2226 : : {
2227 : : /* Fix the locallock to show just the session locks */
2228 : 96234 : locallock->nLocks = lockOwners[0].nLocks;
2229 : 96234 : locallock->numLockOwners = 1;
2230 : : /* We aren't deleting this locallock, so done */
4705 2231 : 96234 : continue;
2232 : : }
2233 : : else
4315 heikki.linnakangas@i 2234 : 887567 : locallock->numLockOwners = 0;
2235 : : }
2236 : :
2237 : : /*
2238 : : * If the lock or proclock pointers are NULL, this lock was taken via
2239 : : * the relation fast-path (and is not known to have been transferred).
2240 : : */
4337 rhaas@postgresql.org 2241 [ + + - + ]: 955023 : if (locallock->proclock == NULL || locallock->lock == NULL)
2242 : 982 : {
2243 : 493275 : LOCKMODE lockmode = locallock->tag.mode;
2244 : : Oid relid;
2245 : :
2246 : : /* Verify that a fast-path lock is what we've got. */
2247 [ + - + - : 493275 : if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
+ - + - -
+ ]
4705 rhaas@postgresql.org 2248 [ # # ]:UBC 0 : elog(PANIC, "locallock table corrupted");
2249 : :
2250 : : /*
2251 : : * If we don't currently hold the LWLock that protects our
2252 : : * fast-path data structures, we must acquire it before attempting
2253 : : * to release the lock via the fast-path. We will continue to
2254 : : * hold the LWLock until we're done scanning the locallock table,
2255 : : * unless we hit a transferred fast-path lock. (XXX is this
2256 : : * really such a good idea? There could be a lot of entries ...)
2257 : : */
4705 rhaas@postgresql.org 2258 [ + + ]:CBC 493275 : if (!have_fast_path_lwlock)
2259 : : {
1430 tgl@sss.pgh.pa.us 2260 : 180445 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4705 rhaas@postgresql.org 2261 : 180445 : have_fast_path_lwlock = true;
2262 : : }
2263 : :
2264 : : /* Attempt fast-path release. */
2265 : 493275 : relid = locallock->tag.lock.locktag_field2;
4653 2266 [ + + ]: 493275 : if (FastPathUnGrantRelationLock(relid, lockmode))
2267 : : {
4705 2268 : 492293 : RemoveLocalLock(locallock);
2269 : 492293 : continue;
2270 : : }
2271 : :
2272 : : /*
2273 : : * Our lock, originally taken via the fast path, has been
2274 : : * transferred to the main lock table. That's going to require
2275 : : * some extra work, so release our fast-path lock before starting.
2276 : : */
1430 tgl@sss.pgh.pa.us 2277 : 982 : LWLockRelease(&MyProc->fpInfoLock);
4705 rhaas@postgresql.org 2278 : 982 : have_fast_path_lwlock = false;
2279 : :
2280 : : /*
2281 : : * Now dump the lock. We haven't got a pointer to the LOCK or
2282 : : * PROCLOCK in this case, so we have to handle this a bit
2283 : : * differently than a normal lock release. Unfortunately, this
2284 : : * requires an extra LWLock acquire-and-release cycle on the
2285 : : * partitionLock, but hopefully it shouldn't happen often.
2286 : : */
2287 : 982 : LockRefindAndRelease(lockMethodTable, MyProc,
2288 : : &locallock->tag.lock, lockmode, false);
7170 tgl@sss.pgh.pa.us 2289 : 982 : RemoveLocalLock(locallock);
2290 : 982 : continue;
2291 : : }
2292 : :
2293 : : /* Mark the proclock to show we need to release this lockmode */
6879 2294 [ + - ]: 461748 : if (locallock->nLocks > 0)
2295 : 461748 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2296 : :
2297 : : /* And remove the locallock hashtable entry */
7170 2298 : 461748 : RemoveLocalLock(locallock);
2299 : : }
2300 : :
2301 : : /* Done with the fast-path data structures */
4705 rhaas@postgresql.org 2302 [ + + ]: 882058 : if (have_fast_path_lwlock)
1430 tgl@sss.pgh.pa.us 2303 : 179463 : LWLockRelease(&MyProc->fpInfoLock);
2304 : :
2305 : : /*
2306 : : * Now, scan each lock partition separately.
2307 : : */
6699 2308 [ + + ]: 14994986 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2309 : : {
2310 : : LWLock *partitionLock;
452 andres@anarazel.de 2311 : 14112928 : dlist_head *procLocks = &MyProc->myProcLocks[partition];
2312 : : dlist_mutable_iter proclock_iter;
2313 : :
3730 rhaas@postgresql.org 2314 : 14112928 : partitionLock = LockHashPartitionLockByIndex(partition);
2315 : :
2316 : : /*
2317 : : * If the proclock list for this partition is empty, we can skip
2318 : : * acquiring the partition lock. This optimization is trickier than
2319 : : * it looks, because another backend could be in process of adding
2320 : : * something to our proclock list due to promoting one of our
2321 : : * fast-path locks. However, any such lock must be one that we
2322 : : * decided not to delete above, so it's okay to skip it again now;
2323 : : * we'd just decide not to delete it again. We must, however, be
2324 : : * careful to re-fetch the list header once we've acquired the
2325 : : * partition lock, to be sure we have a valid, up-to-date pointer.
2326 : : * (There is probably no significant risk if pointer fetch/store is
2327 : : * atomic, but we don't wish to assume that.)
2328 : : *
2329 : : * XXX This argument assumes that the locallock table correctly
2330 : : * represents all of our fast-path locks. While allLocks mode
2331 : : * guarantees to clean up all of our normal locks regardless of the
2332 : : * locallock situation, we lose that guarantee for fast-path locks.
2333 : : * This is not ideal.
2334 : : */
452 andres@anarazel.de 2335 [ + + ]: 14112928 : if (dlist_is_empty(procLocks))
6699 tgl@sss.pgh.pa.us 2336 : 13580698 : continue; /* needn't examine this partition */
2337 : :
2338 : 532230 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2339 : :
452 andres@anarazel.de 2340 [ + - + + ]: 1163373 : dlist_foreach_modify(proclock_iter, procLocks)
2341 : : {
2342 : 631143 : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
6699 tgl@sss.pgh.pa.us 2343 : 631143 : bool wakeupNeeded = false;
2344 : :
6475 2345 [ - + ]: 631143 : Assert(proclock->tag.myProc == MyProc);
2346 : :
2347 : 631143 : lock = proclock->tag.myLock;
2348 : :
2349 : : /* Ignore items that are not of the lockmethod to be removed */
6699 2350 [ + + ]: 631143 : if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
3790 2351 : 96315 : continue;
2352 : :
2353 : : /*
2354 : : * In allLocks mode, force release of all locks even if locallock
2355 : : * table had problems
2356 : : */
6699 2357 [ + + ]: 534828 : if (allLocks)
2358 : 44049 : proclock->releaseMask = proclock->holdMask;
2359 : : else
2360 [ - + ]: 490779 : Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2361 : :
2362 : : /*
2363 : : * Ignore items that have nothing to be released, unless they have
2364 : : * holdMask == 0 and are therefore recyclable
2365 : : */
2366 [ + + + - ]: 534828 : if (proclock->releaseMask == 0 && proclock->holdMask != 0)
3790 2367 : 95594 : continue;
2368 : :
2369 : : PROCLOCK_PRINT("LockReleaseAll", proclock);
2370 : : LOCK_PRINT("LockReleaseAll", lock, 0);
6699 2371 [ - + ]: 439234 : Assert(lock->nRequested >= 0);
2372 [ - + ]: 439234 : Assert(lock->nGranted >= 0);
2373 [ - + ]: 439234 : Assert(lock->nGranted <= lock->nRequested);
2374 [ - + ]: 439234 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
2375 : :
2376 : : /*
2377 : : * Release the previously-marked lock modes
2378 : : */
2379 [ + + ]: 3953106 : for (i = 1; i <= numLockModes; i++)
2380 : : {
2381 [ + + ]: 3513872 : if (proclock->releaseMask & LOCKBIT_ON(i))
2382 : 461749 : wakeupNeeded |= UnGrantLock(lock, i, proclock,
2383 : : lockMethodTable);
2384 : : }
2385 [ + - - + ]: 439234 : Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2386 [ - + ]: 439234 : Assert(lock->nGranted <= lock->nRequested);
2387 : : LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2388 : :
2389 : 439234 : proclock->releaseMask = 0;
2390 : :
2391 : : /* CleanUpLock will wake up waiters if needed. */
2392 : 439234 : CleanUpLock(lock, proclock,
2393 : : lockMethodTable,
6475 2394 : 439234 : LockTagHashCode(&lock->tag),
2395 : : wakeupNeeded);
2396 : : } /* loop over PROCLOCKs within this partition */
2397 : :
6699 2398 : 532230 : LWLockRelease(partitionLock);
2399 : : } /* loop over partitions */
2400 : :
2401 : : #ifdef LOCK_DEBUG
2402 : : if (*(lockMethodTable->trace_flag))
2403 : : elog(LOG, "LockReleaseAll done");
2404 : : #endif
10141 scrappy@hub.org 2405 : 882058 : }
2406 : :
2407 : : /*
2408 : : * LockReleaseSession -- Release all session locks of the specified lock method
2409 : : * that are held by the current process.
2410 : : */
2411 : : void
4363 tgl@sss.pgh.pa.us 2412 : 119 : LockReleaseSession(LOCKMETHODID lockmethodid)
2413 : : {
2414 : : HASH_SEQ_STATUS status;
2415 : : LOCALLOCK *locallock;
2416 : :
2417 [ + - - + ]: 119 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4363 tgl@sss.pgh.pa.us 2418 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2419 : :
4363 tgl@sss.pgh.pa.us 2420 :CBC 119 : hash_seq_init(&status, LockMethodLocalHash);
2421 : :
2422 [ + + ]: 226 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2423 : : {
2424 : : /* Ignore items that are not of the specified lock method */
2425 [ + + ]: 107 : if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2426 : 10 : continue;
2427 : :
2428 : 97 : ReleaseLockIfHeld(locallock, true);
2429 : : }
2430 : 119 : }
2431 : :
2432 : : /*
2433 : : * LockReleaseCurrentOwner
2434 : : * Release all locks belonging to CurrentResourceOwner
2435 : : *
2436 : : * If the caller knows what those locks are, it can pass them as an array.
2437 : : * That speeds up the call significantly, when a lot of locks are held.
2438 : : * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2439 : : * table to find them.
2440 : : */
2441 : : void
4315 heikki.linnakangas@i 2442 : 4698 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2443 : : {
2444 [ + + ]: 4698 : if (locallocks == NULL)
2445 : : {
2446 : : HASH_SEQ_STATUS status;
2447 : : LOCALLOCK *locallock;
2448 : :
2449 : 4 : hash_seq_init(&status, LockMethodLocalHash);
2450 : :
2451 [ + + ]: 269 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2452 : 265 : ReleaseLockIfHeld(locallock, false);
2453 : : }
2454 : : else
2455 : : {
2456 : : int i;
2457 : :
2458 [ + + ]: 7199 : for (i = nlocks - 1; i >= 0; i--)
2459 : 2505 : ReleaseLockIfHeld(locallocks[i], false);
2460 : : }
4804 itagaki.takahiro@gma 2461 : 4698 : }
2462 : :
2463 : : /*
2464 : : * ReleaseLockIfHeld
2465 : : * Release any session-level locks on this lockable object if sessionLock
2466 : : * is true; else, release any locks held by CurrentResourceOwner.
2467 : : *
2468 : : * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2469 : : * locks), but without refactoring LockRelease() we cannot support releasing
2470 : : * locks belonging to resource owners other than CurrentResourceOwner.
2471 : : * If we were to refactor, it'd be a good idea to fix it so we don't have to
2472 : : * do a hashtable lookup of the locallock, too. However, currently this
2473 : : * function isn't used heavily enough to justify refactoring for its
2474 : : * convenience.
2475 : : */
2476 : : static void
4363 tgl@sss.pgh.pa.us 2477 : 2867 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2478 : : {
2479 : : ResourceOwner owner;
2480 : : LOCALLOCKOWNER *lockOwners;
2481 : : int i;
2482 : :
2483 : : /* Identify owner for lock (must match LockRelease!) */
2484 [ + + ]: 2867 : if (sessionLock)
2485 : 97 : owner = NULL;
2486 : : else
2487 : 2770 : owner = CurrentResourceOwner;
2488 : :
2489 : : /* Scan to see if there are any locks belonging to the target owner */
4804 itagaki.takahiro@gma 2490 : 2867 : lockOwners = locallock->lockOwners;
2491 [ + + ]: 3060 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2492 : : {
2493 [ + + ]: 2867 : if (lockOwners[i].owner == owner)
2494 : : {
2495 [ - + ]: 2674 : Assert(lockOwners[i].nLocks > 0);
2496 [ + + ]: 2674 : if (lockOwners[i].nLocks < locallock->nLocks)
2497 : : {
2498 : : /*
2499 : : * We will still hold this lock after forgetting this
2500 : : * ResourceOwner.
2501 : : */
2502 : 674 : locallock->nLocks -= lockOwners[i].nLocks;
2503 : : /* compact out unused slot */
2504 : 674 : locallock->numLockOwners--;
4315 heikki.linnakangas@i 2505 [ + - ]: 674 : if (owner != NULL)
2506 : 674 : ResourceOwnerForgetLock(owner, locallock);
4804 itagaki.takahiro@gma 2507 [ - + ]: 674 : if (i < locallock->numLockOwners)
4804 itagaki.takahiro@gma 2508 :UBC 0 : lockOwners[i] = lockOwners[locallock->numLockOwners];
2509 : : }
2510 : : else
2511 : : {
4804 itagaki.takahiro@gma 2512 [ - + ]:CBC 2000 : Assert(lockOwners[i].nLocks == locallock->nLocks);
2513 : : /* We want to call LockRelease just once */
2514 : 2000 : lockOwners[i].nLocks = 1;
2515 : 2000 : locallock->nLocks = 1;
2516 [ - + ]: 2000 : if (!LockRelease(&locallock->tag.lock,
2517 : : locallock->tag.mode,
2518 : : sessionLock))
4363 tgl@sss.pgh.pa.us 2519 [ # # ]:UBC 0 : elog(WARNING, "ReleaseLockIfHeld: failed??");
2520 : : }
4804 itagaki.takahiro@gma 2521 :CBC 2674 : break;
2522 : : }
2523 : : }
7170 tgl@sss.pgh.pa.us 2524 : 2867 : }
2525 : :
2526 : : /*
2527 : : * LockReassignCurrentOwner
2528 : : * Reassign all locks belonging to CurrentResourceOwner to belong
2529 : : * to its parent resource owner.
2530 : : *
2531 : : * If the caller knows what those locks are, it can pass them as an array.
2532 : : * That speeds up the call significantly, when a lot of locks are held
2533 : : * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2534 : : * and we'll traverse through our hash table to find them.
2535 : : */
2536 : : void
4315 heikki.linnakangas@i 2537 : 306838 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2538 : : {
7170 tgl@sss.pgh.pa.us 2539 : 306838 : ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
2540 : :
2541 [ - + ]: 306838 : Assert(parent != NULL);
2542 : :
4315 heikki.linnakangas@i 2543 [ + + ]: 306838 : if (locallocks == NULL)
2544 : : {
2545 : : HASH_SEQ_STATUS status;
2546 : : LOCALLOCK *locallock;
2547 : :
2548 : 3078 : hash_seq_init(&status, LockMethodLocalHash);
2549 : :
2550 [ + + ]: 88840 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2551 : 85762 : LockReassignOwner(locallock, parent);
2552 : : }
2553 : : else
2554 : : {
2555 : : int i;
2556 : :
2557 [ + + ]: 636462 : for (i = nlocks - 1; i >= 0; i--)
2558 : 332702 : LockReassignOwner(locallocks[i], parent);
2559 : : }
2560 : 306838 : }
2561 : :
2562 : : /*
2563 : : * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2564 : : * CurrentResourceOwner to its parent.
2565 : : */
2566 : : static void
2567 : 418464 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
2568 : : {
2569 : : LOCALLOCKOWNER *lockOwners;
2570 : : int i;
2571 : 418464 : int ic = -1;
2572 : 418464 : int ip = -1;
2573 : :
2574 : : /*
2575 : : * Scan to see if there are any locks belonging to current owner or its
2576 : : * parent
2577 : : */
2578 : 418464 : lockOwners = locallock->lockOwners;
2579 [ + + ]: 981563 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
2580 : : {
2581 [ + + ]: 563099 : if (lockOwners[i].owner == CurrentResourceOwner)
2582 : 401258 : ic = i;
2583 [ + + ]: 161841 : else if (lockOwners[i].owner == parent)
2584 : 120003 : ip = i;
2585 : : }
2586 : :
2587 [ + + ]: 418464 : if (ic < 0)
3973 bruce@momjian.us 2588 : 17206 : return; /* no current locks */
2589 : :
4315 heikki.linnakangas@i 2590 [ + + ]: 401258 : if (ip < 0)
2591 : : {
2592 : : /* Parent has no slot, so just give it the child's slot */
2593 : 298430 : lockOwners[ic].owner = parent;
2594 : 298430 : ResourceOwnerRememberLock(parent, locallock);
2595 : : }
2596 : : else
2597 : : {
2598 : : /* Merge child's count with parent's */
2599 : 102828 : lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2600 : : /* compact out unused slot */
2601 : 102828 : locallock->numLockOwners--;
2602 [ + + ]: 102828 : if (ic < locallock->numLockOwners)
2603 : 666 : lockOwners[ic] = lockOwners[locallock->numLockOwners];
2604 : : }
2605 : 401258 : ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
2606 : : }
2607 : :
2608 : : /*
2609 : : * FastPathGrantRelationLock
2610 : : * Grant lock using per-backend fast-path array, if there is space.
2611 : : */
2612 : : static bool
4653 rhaas@postgresql.org 2613 : 13445546 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2614 : : {
2615 : : uint32 f;
4705 2616 : 13445546 : uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2617 : :
2618 : : /* Scan for existing entry for this relid, remembering empty slot. */
2619 [ + + ]: 227700969 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2620 : : {
2621 [ + + ]: 214632105 : if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2622 : 188541540 : unused_slot = f;
2623 [ + + ]: 26090565 : else if (MyProc->fpRelId[f] == relid)
2624 : : {
2625 [ - + - + : 376682 : Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
- + - + ]
2626 [ - + - + : 376682 : FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
- + ]
2627 : 376682 : return true;
2628 : : }
2629 : : }
2630 : :
2631 : : /* If no existing entry, use any empty slot. */
2632 [ + - ]: 13068864 : if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2633 : : {
2634 : 13068864 : MyProc->fpRelId[unused_slot] = relid;
2635 [ - + - + : 13068864 : FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
- + ]
2636 : 13068864 : ++FastPathLocalUseCount;
2637 : 13068864 : return true;
2638 : : }
2639 : :
2640 : : /* No existing entry, and no empty slot. */
4705 rhaas@postgresql.org 2641 :UBC 0 : return false;
2642 : : }
2643 : :
2644 : : /*
2645 : : * FastPathUnGrantRelationLock
2646 : : * Release fast-path lock, if present. Update backend-private local
2647 : : * use count, while we're at it.
2648 : : */
2649 : : static bool
4653 rhaas@postgresql.org 2650 :CBC 13848350 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
2651 : : {
2652 : : uint32 f;
4705 2653 : 13848350 : bool result = false;
2654 : :
2655 : 13848350 : FastPathLocalUseCount = 0;
2656 [ + + ]: 235421950 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2657 : : {
2658 [ + + ]: 221573600 : if (MyProc->fpRelId[f] == relid
2659 [ - + - + : 16945018 : && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + + + ]
2660 : : {
2661 [ - + ]: 13444140 : Assert(!result);
2662 [ - + - + : 13444140 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + ]
2663 : 13444140 : result = true;
2664 : : /* we continue iterating so as to update FastPathLocalUseCount */
2665 : : }
2666 [ + + ]: 221573600 : if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2667 : 31457550 : ++FastPathLocalUseCount;
2668 : : }
2669 : 13848350 : return result;
2670 : : }
2671 : :
2672 : : /*
2673 : : * FastPathTransferRelationLocks
2674 : : * Transfer locks matching the given lock tag from per-backend fast-path
2675 : : * arrays to the shared hash table.
2676 : : *
2677 : : * Returns true if successful, false if ran out of shared memory.
2678 : : */
2679 : : static bool
4653 2680 : 156365 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2681 : : uint32 hashcode)
2682 : : {
3730 2683 : 156365 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
4326 bruce@momjian.us 2684 : 156365 : Oid relid = locktag->locktag_field2;
2685 : : uint32 i;
2686 : :
2687 : : /*
2688 : : * Every PGPROC that can potentially hold a fast-path lock is present in
2689 : : * ProcGlobal->allProcs. Prepared transactions are not, but any
2690 : : * outstanding fast-path locks held by prepared transactions are
2691 : : * transferred to the main lock table.
2692 : : */
4705 rhaas@postgresql.org 2693 [ + + ]: 16043735 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2694 : : {
2695 : 15887370 : PGPROC *proc = &ProcGlobal->allProcs[i];
2696 : : uint32 f;
2697 : :
1430 tgl@sss.pgh.pa.us 2698 : 15887370 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
2699 : :
2700 : : /*
2701 : : * If the target backend isn't referencing the same database as the
2702 : : * lock, then we needn't examine the individual relation IDs at all;
2703 : : * none of them can be relevant.
2704 : : *
2705 : : * proc->databaseId is set at backend startup time and never changes
2706 : : * thereafter, so it might be safe to perform this test before
2707 : : * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2708 : : * assume that if the target backend holds any fast-path locks, it
2709 : : * must have performed a memory-fencing operation (in particular, an
2710 : : * LWLock acquisition) since setting proc->databaseId. However, it's
2711 : : * less clear that our backend is certain to have performed a memory
2712 : : * fencing operation since the other backend set proc->databaseId. So
2713 : : * for now, we test it after acquiring the LWLock just to be safe.
2714 : : */
4104 rhaas@postgresql.org 2715 [ + + ]: 15887370 : if (proc->databaseId != locktag->locktag_field1)
2716 : : {
1430 tgl@sss.pgh.pa.us 2717 : 7496684 : LWLockRelease(&proc->fpInfoLock);
4705 rhaas@postgresql.org 2718 : 7496684 : continue;
2719 : : }
2720 : :
2721 [ + + ]: 142640450 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2722 : : {
2723 : : uint32 lockmode;
2724 : :
2725 : : /* Look for an allocated slot matching the given relid. */
2726 [ + + + + ]: 134250707 : if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2727 : 134249764 : continue;
2728 : :
2729 : : /* Find or create lock object. */
2730 : 943 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2731 : 943 : for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2489 tgl@sss.pgh.pa.us 2732 [ + + ]: 3772 : lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
4705 rhaas@postgresql.org 2733 : 2829 : ++lockmode)
2734 : : {
2735 : : PROCLOCK *proclock;
2736 : :
2737 [ - + - + : 2829 : if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
- + + + ]
2738 : 1834 : continue;
2739 : 995 : proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2740 : : hashcode, lockmode);
2741 [ - + ]: 995 : if (!proclock)
2742 : : {
4705 rhaas@postgresql.org 2743 :UBC 0 : LWLockRelease(partitionLock);
1430 tgl@sss.pgh.pa.us 2744 : 0 : LWLockRelease(&proc->fpInfoLock);
4705 rhaas@postgresql.org 2745 : 0 : return false;
2746 : : }
4705 rhaas@postgresql.org 2747 :CBC 995 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2748 [ - + - + : 995 : FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
- + ]
2749 : : }
2750 : 943 : LWLockRelease(partitionLock);
2751 : :
2752 : : /* No need to examine remaining slots. */
3791 tgl@sss.pgh.pa.us 2753 : 943 : break;
2754 : : }
1430 2755 : 8390686 : LWLockRelease(&proc->fpInfoLock);
2756 : : }
4705 rhaas@postgresql.org 2757 : 156365 : return true;
2758 : : }
2759 : :
2760 : : /*
2761 : : * FastPathGetRelationLockEntry
2762 : : * Return the PROCLOCK for a lock originally taken via the fast-path,
2763 : : * transferring it to the primary lock table if necessary.
2764 : : *
2765 : : * Note: caller takes care of updating the locallock object.
2766 : : */
2767 : : static PROCLOCK *
4653 2768 : 408 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
2769 : : {
4326 bruce@momjian.us 2770 : 408 : LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2771 : 408 : LOCKTAG *locktag = &locallock->tag.lock;
2772 : 408 : PROCLOCK *proclock = NULL;
3730 rhaas@postgresql.org 2773 : 408 : LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
4326 bruce@momjian.us 2774 : 408 : Oid relid = locktag->locktag_field2;
2775 : : uint32 f;
2776 : :
1430 tgl@sss.pgh.pa.us 2777 : 408 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
2778 : :
4705 rhaas@postgresql.org 2779 [ + + ]: 6510 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2780 : : {
2781 : : uint32 lockmode;
2782 : :
2783 : : /* Look for an allocated slot matching the given relid. */
2784 [ + + - + ]: 6502 : if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2785 : 6102 : continue;
2786 : :
2787 : : /* If we don't have a lock of the given mode, forget it! */
2788 : 400 : lockmode = locallock->tag.mode;
2789 [ - + - + : 400 : if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
- + - + ]
4705 rhaas@postgresql.org 2790 :UBC 0 : break;
2791 : :
2792 : : /* Find or create lock object. */
4705 rhaas@postgresql.org 2793 :CBC 400 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2794 : :
2795 : 400 : proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2796 : : locallock->hashcode, lockmode);
2797 [ - + ]: 400 : if (!proclock)
2798 : : {
4114 tgl@sss.pgh.pa.us 2799 :UBC 0 : LWLockRelease(partitionLock);
1430 2800 : 0 : LWLockRelease(&MyProc->fpInfoLock);
4705 rhaas@postgresql.org 2801 [ # # ]: 0 : ereport(ERROR,
2802 : : (errcode(ERRCODE_OUT_OF_MEMORY),
2803 : : errmsg("out of shared memory"),
2804 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
2805 : : }
4705 rhaas@postgresql.org 2806 :CBC 400 : GrantLock(proclock->tag.myLock, proclock, lockmode);
2807 [ - + - + : 400 : FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
- + ]
2808 : :
2809 : 400 : LWLockRelease(partitionLock);
2810 : :
2811 : : /* No need to examine remaining slots. */
3791 tgl@sss.pgh.pa.us 2812 : 400 : break;
2813 : : }
2814 : :
1430 2815 : 408 : LWLockRelease(&MyProc->fpInfoLock);
2816 : :
2817 : : /* Lock may have already been transferred by some other backend. */
4705 rhaas@postgresql.org 2818 [ + + ]: 408 : if (proclock == NULL)
2819 : : {
2820 : : LOCK *lock;
2821 : : PROCLOCKTAG proclocktag;
2822 : : uint32 proclock_hashcode;
2823 : :
2824 : 8 : LWLockAcquire(partitionLock, LW_SHARED);
2825 : :
2826 : 8 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2827 : : locktag,
2828 : : locallock->hashcode,
2829 : : HASH_FIND,
2830 : : NULL);
2831 [ - + ]: 8 : if (!lock)
4705 rhaas@postgresql.org 2832 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared lock object");
2833 : :
4705 rhaas@postgresql.org 2834 :CBC 8 : proclocktag.myLock = lock;
2835 : 8 : proclocktag.myProc = MyProc;
2836 : :
2837 : 8 : proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2838 : : proclock = (PROCLOCK *)
2839 : 8 : hash_search_with_hash_value(LockMethodProcLockHash,
2840 : : &proclocktag,
2841 : : proclock_hashcode,
2842 : : HASH_FIND,
2843 : : NULL);
2844 [ - + ]: 8 : if (!proclock)
4705 rhaas@postgresql.org 2845 [ # # ]:UBC 0 : elog(ERROR, "failed to re-find shared proclock object");
4705 rhaas@postgresql.org 2846 :CBC 8 : LWLockRelease(partitionLock);
2847 : : }
2848 : :
2849 : 408 : return proclock;
2850 : : }
2851 : :
2852 : : /*
2853 : : * GetLockConflicts
2854 : : * Get an array of VirtualTransactionIds of xacts currently holding locks
2855 : : * that would conflict with the specified lock/lockmode.
2856 : : * xacts merely awaiting such a lock are NOT reported.
2857 : : *
2858 : : * The result array is palloc'd and is terminated with an invalid VXID.
2859 : : * *countp, if not null, is updated to the number of items set.
2860 : : *
2861 : : * Of course, the result could be out of date by the time it's returned, so
2862 : : * use of this function has to be thought about carefully. Similarly, a
2863 : : * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2864 : : * lock it holds. Existing callers don't care about a locker after that
2865 : : * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2866 : : * pg_xact updates and before releasing locks.
2867 : : *
2868 : : * Note we never include the current xact's vxid in the result array,
2869 : : * since an xact never blocks itself.
2870 : : */
2871 : : VirtualTransactionId *
1839 alvherre@alvh.no-ip. 2872 : 1232 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2873 : : {
2874 : : static VirtualTransactionId *vxids;
6440 tgl@sss.pgh.pa.us 2875 : 1232 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2876 : : LockMethod lockMethodTable;
2877 : : LOCK *lock;
2878 : : LOCKMASK conflictMask;
2879 : : dlist_iter proclock_iter;
2880 : : PROCLOCK *proclock;
2881 : : uint32 hashcode;
2882 : : LWLock *partitionLock;
6066 2883 : 1232 : int count = 0;
4705 rhaas@postgresql.org 2884 : 1232 : int fast_count = 0;
2885 : :
6440 tgl@sss.pgh.pa.us 2886 [ + - - + ]: 1232 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6440 tgl@sss.pgh.pa.us 2887 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6440 tgl@sss.pgh.pa.us 2888 :CBC 1232 : lockMethodTable = LockMethods[lockmethodid];
2889 [ + - - + ]: 1232 : if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
6440 tgl@sss.pgh.pa.us 2890 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock mode: %d", lockmode);
2891 : :
2892 : : /*
2893 : : * Allocate memory to store results, and fill with InvalidVXID. We only
2894 : : * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2895 : : * InHotStandby allocate once in TopMemoryContext.
2896 : : */
5189 simon@2ndQuadrant.co 2897 [ + + ]:CBC 1232 : if (InHotStandby)
2898 : : {
5190 simon@2ndQuadrant.co 2899 [ + + ]:GBC 4 : if (vxids == NULL)
2900 : 1 : vxids = (VirtualTransactionId *)
5189 2901 : 1 : MemoryContextAlloc(TopMemoryContext,
2902 : : sizeof(VirtualTransactionId) *
733 rhaas@postgresql.org 2903 : 1 : (MaxBackends + max_prepared_xacts + 1));
2904 : : }
2905 : : else
5189 simon@2ndQuadrant.co 2906 :CBC 1228 : vxids = (VirtualTransactionId *)
1170 noah@leadboat.com 2907 : 1228 : palloc0(sizeof(VirtualTransactionId) *
733 rhaas@postgresql.org 2908 : 1228 : (MaxBackends + max_prepared_xacts + 1));
2909 : :
2910 : : /* Compute hash code and partition lock, and look up conflicting modes. */
6440 tgl@sss.pgh.pa.us 2911 : 1232 : hashcode = LockTagHashCode(locktag);
2912 : 1232 : partitionLock = LockHashPartitionLock(hashcode);
4705 rhaas@postgresql.org 2913 : 1232 : conflictMask = lockMethodTable->conflictTab[lockmode];
2914 : :
2915 : : /*
2916 : : * Fast path locks might not have been entered in the primary lock table.
2917 : : * If the lock we're dealing with could conflict with such a lock, we must
2918 : : * examine each backend's fast-path array for conflicts.
2919 : : */
4337 2920 [ + - + - : 1232 : if (ConflictsWithRelationFastPath(locktag, lockmode))
+ - + - ]
2921 : : {
2922 : : int i;
4705 2923 : 1232 : Oid relid = locktag->locktag_field2;
2924 : : VirtualTransactionId vxid;
2925 : :
2926 : : /*
2927 : : * Iterate over relevant PGPROCs. Anything held by a prepared
2928 : : * transaction will have been transferred to the primary lock table,
2929 : : * so we need not worry about those. This is all a bit fuzzy, because
2930 : : * new locks could be taken after we've visited a particular
2931 : : * partition, but the callers had better be prepared to deal with that
2932 : : * anyway, since the locks could equally well be taken between the
2933 : : * time we return the value and the time the caller does something
2934 : : * with it.
2935 : : */
2936 [ + + ]: 139018 : for (i = 0; i < ProcGlobal->allProcCount; i++)
2937 : : {
2938 : 137786 : PGPROC *proc = &ProcGlobal->allProcs[i];
2939 : : uint32 f;
2940 : :
2941 : : /* A backend never blocks itself */
2942 [ + + ]: 137786 : if (proc == MyProc)
2943 : 1232 : continue;
2944 : :
1430 tgl@sss.pgh.pa.us 2945 : 136554 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
2946 : :
2947 : : /*
2948 : : * If the target backend isn't referencing the same database as
2949 : : * the lock, then we needn't examine the individual relation IDs
2950 : : * at all; none of them can be relevant.
2951 : : *
2952 : : * See FastPathTransferRelationLocks() for discussion of why we do
2953 : : * this test after acquiring the lock.
2954 : : */
4104 rhaas@postgresql.org 2955 [ + + ]: 136554 : if (proc->databaseId != locktag->locktag_field1)
2956 : : {
1430 tgl@sss.pgh.pa.us 2957 : 57075 : LWLockRelease(&proc->fpInfoLock);
4705 rhaas@postgresql.org 2958 : 57075 : continue;
2959 : : }
2960 : :
2961 [ + + ]: 1350918 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2962 : : {
2963 : : uint32 lockmask;
2964 : :
2965 : : /* Look for an allocated slot matching the given relid. */
2966 [ + + ]: 1271616 : if (relid != proc->fpRelId[f])
2967 : 1270618 : continue;
2968 : 998 : lockmask = FAST_PATH_GET_BITS(proc, f);
2969 [ + + ]: 998 : if (!lockmask)
2970 : 821 : continue;
2971 : 177 : lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2972 : :
2973 : : /*
2974 : : * There can only be one entry per relation, so if we found it
2975 : : * and it doesn't conflict, we can skip the rest of the slots.
2976 : : */
2977 [ + + ]: 177 : if ((lockmask & conflictMask) == 0)
2978 : 5 : break;
2979 : :
2980 : : /* Conflict! */
2981 : 172 : GET_VXID_FROM_PGPROC(vxid, *proc);
2982 : :
2983 [ + - ]: 172 : if (VirtualTransactionIdIsValid(vxid))
2984 : 172 : vxids[count++] = vxid;
2985 : : /* else, xact already committed or aborted */
2986 : :
2987 : : /* No need to examine remaining slots. */
2988 : 172 : break;
2989 : : }
2990 : :
1430 tgl@sss.pgh.pa.us 2991 : 79479 : LWLockRelease(&proc->fpInfoLock);
2992 : : }
2993 : : }
2994 : :
2995 : : /* Remember how many fast-path conflicts we found. */
4705 rhaas@postgresql.org 2996 : 1232 : fast_count = count;
2997 : :
2998 : : /*
2999 : : * Look up the lock object matching the tag.
3000 : : */
6440 tgl@sss.pgh.pa.us 3001 : 1232 : LWLockAcquire(partitionLock, LW_SHARED);
3002 : :
3003 : 1232 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3004 : : locktag,
3005 : : hashcode,
3006 : : HASH_FIND,
3007 : : NULL);
3008 [ + + ]: 1232 : if (!lock)
3009 : : {
3010 : : /*
3011 : : * If the lock object doesn't exist, there is nothing holding a lock
3012 : : * on this lockable object.
3013 : : */
3014 : 70 : LWLockRelease(partitionLock);
42 heikki.linnakangas@i 3015 :GNC 70 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3363 andres@anarazel.de 3016 :CBC 70 : vxids[count].localTransactionId = InvalidLocalTransactionId;
1839 alvherre@alvh.no-ip. 3017 [ - + ]: 70 : if (countp)
1839 alvherre@alvh.no-ip. 3018 :UBC 0 : *countp = count;
6066 tgl@sss.pgh.pa.us 3019 :CBC 70 : return vxids;
3020 : : }
3021 : :
3022 : : /*
3023 : : * Examine each existing holder (or awaiter) of the lock.
3024 : : */
452 andres@anarazel.de 3025 [ + - + + ]: 2347 : dlist_foreach(proclock_iter, &lock->procLocks)
3026 : : {
3027 : 1185 : proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3028 : :
6440 tgl@sss.pgh.pa.us 3029 [ + + ]: 1185 : if (conflictMask & proclock->holdMask)
3030 : : {
6402 bruce@momjian.us 3031 : 1181 : PGPROC *proc = proclock->tag.myProc;
3032 : :
3033 : : /* A backend never blocks itself */
6440 tgl@sss.pgh.pa.us 3034 [ + + ]: 1181 : if (proc != MyProc)
3035 : : {
3036 : : VirtualTransactionId vxid;
3037 : :
6066 3038 : 23 : GET_VXID_FROM_PGPROC(vxid, *proc);
3039 : :
3040 [ + - ]: 23 : if (VirtualTransactionIdIsValid(vxid))
3041 : : {
3042 : : int i;
3043 : :
3044 : : /* Avoid duplicate entries. */
4705 rhaas@postgresql.org 3045 [ + + ]: 34 : for (i = 0; i < fast_count; ++i)
3046 [ + + - + ]: 11 : if (VirtualTransactionIdEquals(vxids[i], vxid))
4705 rhaas@postgresql.org 3047 :UBC 0 : break;
4705 rhaas@postgresql.org 3048 [ + - ]:CBC 23 : if (i >= fast_count)
3049 : 23 : vxids[count++] = vxid;
3050 : : }
3051 : : /* else, xact already committed or aborted */
3052 : : }
3053 : : }
3054 : : }
3055 : :
6440 tgl@sss.pgh.pa.us 3056 : 1162 : LWLockRelease(partitionLock);
3057 : :
733 rhaas@postgresql.org 3058 [ - + ]: 1162 : if (count > MaxBackends + max_prepared_xacts) /* should never happen */
6066 tgl@sss.pgh.pa.us 3059 [ # # ]:UBC 0 : elog(PANIC, "too many conflicting locks found");
3060 : :
42 heikki.linnakangas@i 3061 :GNC 1162 : vxids[count].procNumber = INVALID_PROC_NUMBER;
3363 andres@anarazel.de 3062 :CBC 1162 : vxids[count].localTransactionId = InvalidLocalTransactionId;
1839 alvherre@alvh.no-ip. 3063 [ + + ]: 1162 : if (countp)
3064 : 1159 : *countp = count;
6066 tgl@sss.pgh.pa.us 3065 : 1162 : return vxids;
3066 : : }
3067 : :
3068 : : /*
3069 : : * Find a lock in the shared lock table and release it. It is the caller's
3070 : : * responsibility to verify that this is a sane thing to do. (For example, it
3071 : : * would be bad to release a lock here if there might still be a LOCALLOCK
3072 : : * object with pointers to it.)
3073 : : *
3074 : : * We currently use this in two situations: first, to release locks held by
3075 : : * prepared transactions on commit (see lock_twophase_postcommit); and second,
3076 : : * to release locks taken via the fast-path, transferred to the main hash
3077 : : * table, and then released (see LockReleaseAll).
3078 : : */
3079 : : static void
4705 rhaas@postgresql.org 3080 : 2135 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3081 : : LOCKTAG *locktag, LOCKMODE lockmode,
3082 : : bool decrement_strong_lock_count)
3083 : : {
3084 : : LOCK *lock;
3085 : : PROCLOCK *proclock;
3086 : : PROCLOCKTAG proclocktag;
3087 : : uint32 hashcode;
3088 : : uint32 proclock_hashcode;
3089 : : LWLock *partitionLock;
3090 : : bool wakeupNeeded;
3091 : :
3092 : 2135 : hashcode = LockTagHashCode(locktag);
3093 : 2135 : partitionLock = LockHashPartitionLock(hashcode);
3094 : :
3095 : 2135 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3096 : :
3097 : : /*
3098 : : * Re-find the lock object (it had better be there).
3099 : : */
3100 : 2135 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3101 : : locktag,
3102 : : hashcode,
3103 : : HASH_FIND,
3104 : : NULL);
3105 [ - + ]: 2135 : if (!lock)
4705 rhaas@postgresql.org 3106 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared lock object");
3107 : :
3108 : : /*
3109 : : * Re-find the proclock object (ditto).
3110 : : */
4705 rhaas@postgresql.org 3111 :CBC 2135 : proclocktag.myLock = lock;
3112 : 2135 : proclocktag.myProc = proc;
3113 : :
3114 : 2135 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3115 : :
3116 : 2135 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3117 : : &proclocktag,
3118 : : proclock_hashcode,
3119 : : HASH_FIND,
3120 : : NULL);
3121 [ - + ]: 2135 : if (!proclock)
4705 rhaas@postgresql.org 3122 [ # # ]:UBC 0 : elog(PANIC, "failed to re-find shared proclock object");
3123 : :
3124 : : /*
3125 : : * Double-check that we are actually holding a lock of the type we want to
3126 : : * release.
3127 : : */
4705 rhaas@postgresql.org 3128 [ - + ]:CBC 2135 : if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3129 : : {
3130 : : PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
4705 rhaas@postgresql.org 3131 :UBC 0 : LWLockRelease(partitionLock);
3132 [ # # ]: 0 : elog(WARNING, "you don't own a lock of type %s",
3133 : : lockMethodTable->lockModeNames[lockmode]);
3134 : 0 : return;
3135 : : }
3136 : :
3137 : : /*
3138 : : * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3139 : : */
4705 rhaas@postgresql.org 3140 :CBC 2135 : wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3141 : :
3142 : 2135 : CleanUpLock(lock, proclock,
3143 : : lockMethodTable, hashcode,
3144 : : wakeupNeeded);
3145 : :
3146 : 2135 : LWLockRelease(partitionLock);
3147 : :
3148 : : /*
3149 : : * Decrement strong lock count. This logic is needed only for 2PC.
3150 : : */
3151 [ + + ]: 2135 : if (decrement_strong_lock_count
3552 3152 [ + - + + : 912 : && ConflictsWithRelationFastPath(locktag, lockmode))
+ - + + ]
3153 : : {
4326 bruce@momjian.us 3154 : 64 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3155 : :
4653 rhaas@postgresql.org 3156 [ - + ]: 64 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3660 3157 [ - + ]: 64 : Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
4653 3158 : 64 : FastPathStrongRelationLocks->count[fasthashcode]--;
3159 : 64 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3160 : : }
3161 : : }
3162 : :
3163 : : /*
3164 : : * CheckForSessionAndXactLocks
3165 : : * Check to see if transaction holds both session-level and xact-level
3166 : : * locks on the same object; if so, throw an error.
3167 : : *
3168 : : * If we have both session- and transaction-level locks on the same object,
3169 : : * PREPARE TRANSACTION must fail. This should never happen with regular
3170 : : * locks, since we only take those at session level in some special operations
3171 : : * like VACUUM. It's possible to hit this with advisory locks, though.
3172 : : *
3173 : : * It would be nice if we could keep the session hold and give away the
3174 : : * transactional hold to the prepared xact. However, that would require two
3175 : : * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3176 : : * available when it comes time for PostPrepare_Locks to do the deed.
3177 : : * So for now, we error out while we can still do so safely.
3178 : : *
3179 : : * Since the LOCALLOCK table stores a separate entry for each lockmode,
3180 : : * we can't implement this check by examining LOCALLOCK entries in isolation.
3181 : : * We must build a transient hashtable that is indexed by locktag only.
3182 : : */
3183 : : static void
995 tgl@sss.pgh.pa.us 3184 : 393 : CheckForSessionAndXactLocks(void)
3185 : : {
3186 : : typedef struct
3187 : : {
3188 : : LOCKTAG lock; /* identifies the lockable object */
3189 : : bool sessLock; /* is any lockmode held at session level? */
3190 : : bool xactLock; /* is any lockmode held at xact level? */
3191 : : } PerLockTagEntry;
3192 : :
3193 : : HASHCTL hash_ctl;
3194 : : HTAB *lockhtab;
3195 : : HASH_SEQ_STATUS status;
3196 : : LOCALLOCK *locallock;
3197 : :
3198 : : /* Create a local hash table keyed by LOCKTAG only */
3199 : 393 : hash_ctl.keysize = sizeof(LOCKTAG);
3200 : 393 : hash_ctl.entrysize = sizeof(PerLockTagEntry);
3201 : 393 : hash_ctl.hcxt = CurrentMemoryContext;
3202 : :
3203 : 393 : lockhtab = hash_create("CheckForSessionAndXactLocks table",
3204 : : 256, /* arbitrary initial size */
3205 : : &hash_ctl,
3206 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
3207 : :
3208 : : /* Scan local lock table to find entries for each LOCKTAG */
3209 : 393 : hash_seq_init(&status, LockMethodLocalHash);
3210 : :
3211 [ + + ]: 1302 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3212 : : {
3213 : 911 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3214 : : PerLockTagEntry *hentry;
3215 : : bool found;
3216 : : int i;
3217 : :
3218 : : /*
3219 : : * Ignore VXID locks. We don't want those to be held by prepared
3220 : : * transactions, since they aren't meaningful after a restart.
3221 : : */
3222 [ - + ]: 911 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
995 tgl@sss.pgh.pa.us 3223 :UBC 0 : continue;
3224 : :
3225 : : /* Ignore it if we don't actually hold the lock */
995 tgl@sss.pgh.pa.us 3226 [ - + ]:CBC 911 : if (locallock->nLocks <= 0)
995 tgl@sss.pgh.pa.us 3227 :UBC 0 : continue;
3228 : :
3229 : : /* Otherwise, find or make an entry in lockhtab */
995 tgl@sss.pgh.pa.us 3230 :CBC 911 : hentry = (PerLockTagEntry *) hash_search(lockhtab,
433 peter@eisentraut.org 3231 : 911 : &locallock->tag.lock,
3232 : : HASH_ENTER, &found);
995 tgl@sss.pgh.pa.us 3233 [ + + ]: 911 : if (!found) /* initialize, if newly created */
3234 : 867 : hentry->sessLock = hentry->xactLock = false;
3235 : :
3236 : : /* Scan to see if we hold lock at session or xact level or both */
3237 [ + + ]: 1822 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3238 : : {
3239 [ + + ]: 911 : if (lockOwners[i].owner == NULL)
3240 : 12 : hentry->sessLock = true;
3241 : : else
3242 : 899 : hentry->xactLock = true;
3243 : : }
3244 : :
3245 : : /*
3246 : : * We can throw error immediately when we see both types of locks; no
3247 : : * need to wait around to see if there are more violations.
3248 : : */
3249 [ + + + + ]: 911 : if (hentry->sessLock && hentry->xactLock)
3250 [ + - ]: 2 : ereport(ERROR,
3251 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3252 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3253 : : }
3254 : :
3255 : : /* Success, so clean up */
3256 : 391 : hash_destroy(lockhtab);
3257 : 391 : }
3258 : :
3259 : : /*
3260 : : * AtPrepare_Locks
3261 : : * Do the preparatory work for a PREPARE: make 2PC state file records
3262 : : * for all locks currently held.
3263 : : *
3264 : : * Session-level locks are ignored, as are VXID locks.
3265 : : *
3266 : : * For the most part, we don't need to touch shared memory for this ---
3267 : : * all the necessary state information is in the locallock table.
3268 : : * Fast-path locks are an exception, however: we move any such locks to
3269 : : * the main table before allowing PREPARE TRANSACTION to succeed.
3270 : : */
3271 : : void
6876 3272 : 393 : AtPrepare_Locks(void)
3273 : : {
3274 : : HASH_SEQ_STATUS status;
3275 : : LOCALLOCK *locallock;
3276 : :
3277 : : /* First, verify there aren't locks of both xact and session level */
995 3278 : 393 : CheckForSessionAndXactLocks();
3279 : :
3280 : : /* Now do the per-locallock cleanup work */
6701 3281 : 391 : hash_seq_init(&status, LockMethodLocalHash);
3282 : :
6876 3283 [ + + ]: 1298 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3284 : : {
3285 : : TwoPhaseLockRecord record;
3286 : 907 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3287 : : bool haveSessionLock;
3288 : : bool haveXactLock;
3289 : : int i;
3290 : :
3291 : : /*
3292 : : * Ignore VXID locks. We don't want those to be held by prepared
3293 : : * transactions, since they aren't meaningful after a restart.
3294 : : */
6066 3295 [ - + ]: 907 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
6066 tgl@sss.pgh.pa.us 3296 :UBC 0 : continue;
3297 : :
3298 : : /* Ignore it if we don't actually hold the lock */
6876 tgl@sss.pgh.pa.us 3299 [ - + ]:CBC 907 : if (locallock->nLocks <= 0)
6876 tgl@sss.pgh.pa.us 3300 :UBC 0 : continue;
3301 : :
3302 : : /* Scan to see whether we hold it at session or transaction level */
4363 tgl@sss.pgh.pa.us 3303 :CBC 907 : haveSessionLock = haveXactLock = false;
6876 3304 [ + + ]: 1814 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3305 : : {
3306 [ + + ]: 907 : if (lockOwners[i].owner == NULL)
4363 3307 : 10 : haveSessionLock = true;
3308 : : else
3309 : 897 : haveXactLock = true;
3310 : : }
3311 : :
3312 : : /* Ignore it if we have only session lock */
3313 [ + + ]: 907 : if (!haveXactLock)
3314 : 10 : continue;
3315 : :
3316 : : /* This can't happen, because we already checked it */
3317 [ - + ]: 897 : if (haveSessionLock)
4363 tgl@sss.pgh.pa.us 3318 [ # # ]:UBC 0 : ereport(ERROR,
3319 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3320 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3321 : :
3322 : : /*
3323 : : * If the local lock was taken via the fast-path, we need to move it
3324 : : * to the primary lock table, or just get a pointer to the existing
3325 : : * primary lock table entry if by chance it's already been
3326 : : * transferred.
3327 : : */
4705 rhaas@postgresql.org 3328 [ + + ]:CBC 897 : if (locallock->proclock == NULL)
3329 : : {
4653 3330 : 408 : locallock->proclock = FastPathGetRelationLockEntry(locallock);
4705 3331 : 408 : locallock->lock = locallock->proclock->tag.myLock;
3332 : : }
3333 : :
3334 : : /*
3335 : : * Arrange to not release any strong lock count held by this lock
3336 : : * entry. We must retain the count until the prepared transaction is
3337 : : * committed or rolled back.
3338 : : */
2433 peter_e@gmx.net 3339 : 897 : locallock->holdsStrongLockCount = false;
3340 : :
3341 : : /*
3342 : : * Create a 2PC record.
3343 : : */
6876 tgl@sss.pgh.pa.us 3344 : 897 : memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3345 : 897 : record.lockmode = locallock->tag.mode;
3346 : :
3347 : 897 : RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
3348 : : &record, sizeof(TwoPhaseLockRecord));
3349 : : }
3350 : 391 : }
3351 : :
3352 : : /*
3353 : : * PostPrepare_Locks
3354 : : * Clean up after successful PREPARE
3355 : : *
3356 : : * Here, we want to transfer ownership of our locks to a dummy PGPROC
3357 : : * that's now associated with the prepared transaction, and we want to
3358 : : * clean out the corresponding entries in the LOCALLOCK table.
3359 : : *
3360 : : * Note: by removing the LOCALLOCK entries, we are leaving dangling
3361 : : * pointers in the transaction's resource owner. This is OK at the
3362 : : * moment since resowner.c doesn't try to free locks retail at a toplevel
3363 : : * transaction commit or abort. We could alternatively zero out nLocks
3364 : : * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3365 : : * but that probably costs more cycles.
3366 : : */
3367 : : void
3368 : 391 : PostPrepare_Locks(TransactionId xid)
3369 : : {
1875 michael@paquier.xyz 3370 : 391 : PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3371 : : HASH_SEQ_STATUS status;
3372 : : LOCALLOCK *locallock;
3373 : : LOCK *lock;
3374 : : PROCLOCK *proclock;
3375 : : PROCLOCKTAG proclocktag;
3376 : : int partition;
3377 : :
3378 : : /* Can't prepare a lock group follower. */
2989 rhaas@postgresql.org 3379 [ - + - - ]: 391 : Assert(MyProc->lockGroupLeader == NULL ||
3380 : : MyProc->lockGroupLeader == MyProc);
3381 : :
3382 : : /* This is a critical section: any error means big trouble */
6876 tgl@sss.pgh.pa.us 3383 : 391 : START_CRIT_SECTION();
3384 : :
3385 : : /*
3386 : : * First we run through the locallock table and get rid of unwanted
3387 : : * entries, then we scan the process's proclocks and transfer them to the
3388 : : * target proc.
3389 : : *
3390 : : * We do this separately because we may have multiple locallock entries
3391 : : * pointing to the same proclock, and we daren't end up with any dangling
3392 : : * pointers.
3393 : : */
6701 3394 : 391 : hash_seq_init(&status, LockMethodLocalHash);
3395 : :
6876 3396 [ + + ]: 1298 : while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3397 : : {
4363 3398 : 907 : LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3399 : : bool haveSessionLock;
3400 : : bool haveXactLock;
3401 : : int i;
3402 : :
6876 3403 [ + - - + ]: 907 : if (locallock->proclock == NULL || locallock->lock == NULL)
3404 : : {
3405 : : /*
3406 : : * We must've run out of shared memory while trying to set up this
3407 : : * lock. Just forget the local entry.
3408 : : */
6876 tgl@sss.pgh.pa.us 3409 [ # # ]:UBC 0 : Assert(locallock->nLocks == 0);
3410 : 0 : RemoveLocalLock(locallock);
3411 : 0 : continue;
3412 : : }
3413 : :
3414 : : /* Ignore VXID locks */
6066 tgl@sss.pgh.pa.us 3415 [ - + ]:CBC 907 : if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
6066 tgl@sss.pgh.pa.us 3416 :UBC 0 : continue;
3417 : :
3418 : : /* Scan to see whether we hold it at session or transaction level */
4363 tgl@sss.pgh.pa.us 3419 :CBC 907 : haveSessionLock = haveXactLock = false;
3420 [ + + ]: 1814 : for (i = locallock->numLockOwners - 1; i >= 0; i--)
3421 : : {
3422 [ + + ]: 907 : if (lockOwners[i].owner == NULL)
3423 : 10 : haveSessionLock = true;
3424 : : else
3425 : 897 : haveXactLock = true;
3426 : : }
3427 : :
3428 : : /* Ignore it if we have only session lock */
3429 [ + + ]: 907 : if (!haveXactLock)
3430 : 10 : continue;
3431 : :
3432 : : /* This can't happen, because we already checked it */
3433 [ - + ]: 897 : if (haveSessionLock)
4363 tgl@sss.pgh.pa.us 3434 [ # # ]:UBC 0 : ereport(PANIC,
3435 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3436 : : errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3437 : :
3438 : : /* Mark the proclock to show we need to release this lockmode */
6876 tgl@sss.pgh.pa.us 3439 [ + - ]:CBC 897 : if (locallock->nLocks > 0)
3440 : 897 : locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3441 : :
3442 : : /* And remove the locallock hashtable entry */
3443 : 897 : RemoveLocalLock(locallock);
3444 : : }
3445 : :
3446 : : /*
3447 : : * Now, scan each lock partition separately.
3448 : : */
6699 3449 [ + + ]: 6647 : for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3450 : : {
3451 : : LWLock *partitionLock;
452 andres@anarazel.de 3452 : 6256 : dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3453 : : dlist_mutable_iter proclock_iter;
3454 : :
3730 rhaas@postgresql.org 3455 : 6256 : partitionLock = LockHashPartitionLockByIndex(partition);
3456 : :
3457 : : /*
3458 : : * If the proclock list for this partition is empty, we can skip
3459 : : * acquiring the partition lock. This optimization is safer than the
3460 : : * situation in LockReleaseAll, because we got rid of any fast-path
3461 : : * locks during AtPrepare_Locks, so there cannot be any case where
3462 : : * another backend is adding something to our lists now. For safety,
3463 : : * though, we code this the same way as in LockReleaseAll.
3464 : : */
452 andres@anarazel.de 3465 [ + + ]: 6256 : if (dlist_is_empty(procLocks))
6699 tgl@sss.pgh.pa.us 3466 : 5398 : continue; /* needn't examine this partition */
3467 : :
3468 : 858 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3469 : :
452 andres@anarazel.de 3470 [ + - + + ]: 1757 : dlist_foreach_modify(proclock_iter, procLocks)
3471 : : {
3472 : 899 : proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3473 : :
6475 tgl@sss.pgh.pa.us 3474 [ - + ]: 899 : Assert(proclock->tag.myProc == MyProc);
3475 : :
3476 : 899 : lock = proclock->tag.myLock;
3477 : :
3478 : : /* Ignore VXID locks */
6066 3479 [ + + ]: 899 : if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3790 3480 : 34 : continue;
3481 : :
3482 : : PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3483 : : LOCK_PRINT("PostPrepare_Locks", lock, 0);
6699 3484 [ - + ]: 865 : Assert(lock->nRequested >= 0);
3485 [ - + ]: 865 : Assert(lock->nGranted >= 0);
3486 [ - + ]: 865 : Assert(lock->nGranted <= lock->nRequested);
3487 [ - + ]: 865 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
3488 : :
3489 : : /* Ignore it if nothing to release (must be a session lock) */
4363 3490 [ + + ]: 865 : if (proclock->releaseMask == 0)
3790 3491 : 10 : continue;
3492 : :
3493 : : /* Else we should be releasing all locks */
6699 3494 [ - + ]: 855 : if (proclock->releaseMask != proclock->holdMask)
6699 tgl@sss.pgh.pa.us 3495 [ # # ]:UBC 0 : elog(PANIC, "we seem to have dropped a bit somewhere");
3496 : :
3497 : : /*
3498 : : * We cannot simply modify proclock->tag.myProc to reassign
3499 : : * ownership of the lock, because that's part of the hash key and
3500 : : * the proclock would then be in the wrong hash chain. Instead
3501 : : * use hash_update_hash_key. (We used to create a new hash entry,
3502 : : * but that risks out-of-memory failure if other processes are
3503 : : * busy making proclocks too.) We must unlink the proclock from
3504 : : * our procLink chain and put it into the new proc's chain, too.
3505 : : *
3506 : : * Note: the updated proclock hash key will still belong to the
3507 : : * same hash partition, cf proclock_hash(). So the partition lock
3508 : : * we already hold is sufficient for this.
3509 : : */
452 andres@anarazel.de 3510 :CBC 855 : dlist_delete(&proclock->procLink);
3511 : :
3512 : : /*
3513 : : * Create the new hash key for the proclock.
3514 : : */
6475 tgl@sss.pgh.pa.us 3515 : 855 : proclocktag.myLock = lock;
3516 : 855 : proclocktag.myProc = newproc;
3517 : :
3518 : : /*
3519 : : * Update groupLeader pointer to point to the new proc. (We'd
3520 : : * better not be a member of somebody else's lock group!)
3521 : : */
2989 rhaas@postgresql.org 3522 [ - + ]: 855 : Assert(proclock->groupLeader == proclock->tag.myProc);
3523 : 855 : proclock->groupLeader = newproc;
3524 : :
3525 : : /*
3526 : : * Update the proclock. We should not find any existing entry for
3527 : : * the same hash key, since there can be only one entry for any
3528 : : * given lock with my own proc.
3529 : : */
4109 tgl@sss.pgh.pa.us 3530 [ - + ]: 855 : if (!hash_update_hash_key(LockMethodProcLockHash,
3531 : : proclock,
3532 : : &proclocktag))
4109 tgl@sss.pgh.pa.us 3533 [ # # ]:UBC 0 : elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3534 : :
3535 : : /* Re-link into the new proc's proclock list */
452 andres@anarazel.de 3536 :CBC 855 : dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3537 : :
3538 : : PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3539 : : } /* loop over PROCLOCKs within this partition */
3540 : :
6699 tgl@sss.pgh.pa.us 3541 : 858 : LWLockRelease(partitionLock);
3542 : : } /* loop over partitions */
3543 : :
6876 3544 [ - + ]: 391 : END_CRIT_SECTION();
3545 : 391 : }
3546 : :
3547 : :
3548 : : /*
3549 : : * Estimate shared-memory space used for lock tables
3550 : : */
3551 : : Size
3552 : 1679 : LockShmemSize(void)
3553 : : {
6701 3554 : 1679 : Size size = 0;
3555 : : long max_table_size;
3556 : :
3557 : : /* lock hash table */
6699 3558 : 1679 : max_table_size = NLOCKENTS();
6475 3559 : 1679 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3560 : :
3561 : : /* proclock hash table */
6699 3562 : 1679 : max_table_size *= 2;
6475 3563 : 1679 : size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3564 : :
3565 : : /*
3566 : : * Since NLOCKENTS is only an estimate, add 10% safety margin.
3567 : : */
6812 3568 : 1679 : size = add_size(size, size / 10);
3569 : :
9716 bruce@momjian.us 3570 : 1679 : return size;
3571 : : }
3572 : :
3573 : : /*
3574 : : * GetLockStatusData - Return a summary of the lock manager's internal
3575 : : * status, for use in a user-level reporting function.
3576 : : *
3577 : : * The return data consists of an array of LockInstanceData objects,
3578 : : * which are a lightly abstracted version of the PROCLOCK data structures,
3579 : : * i.e. there is one entry for each unique lock and interested PGPROC.
3580 : : * It is the caller's responsibility to match up related items (such as
3581 : : * references to the same lockable object or PGPROC) if wanted.
3582 : : *
3583 : : * The design goal is to hold the LWLocks for as short a time as possible;
3584 : : * thus, this function simply makes a copy of the necessary data and releases
3585 : : * the locks, allowing the caller to contemplate and format the data for as
3586 : : * long as it pleases.
3587 : : */
3588 : : LockData *
7897 tgl@sss.pgh.pa.us 3589 : 227 : GetLockStatusData(void)
3590 : : {
3591 : : LockData *data;
3592 : : PROCLOCK *proclock;
3593 : : HASH_SEQ_STATUS seqstat;
3594 : : int els;
3595 : : int el;
3596 : : int i;
3597 : :
3598 : 227 : data = (LockData *) palloc(sizeof(LockData));
3599 : :
3600 : : /* Guess how much space we'll need. */
733 rhaas@postgresql.org 3601 : 227 : els = MaxBackends;
4705 3602 : 227 : el = 0;
3603 : 227 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3604 : :
3605 : : /*
3606 : : * First, we iterate through the per-backend fast-path arrays, locking
3607 : : * them one at a time. This might produce an inconsistent picture of the
3608 : : * system state, but taking all of those LWLocks at the same time seems
3609 : : * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3610 : : * matter too much, because none of these locks can be involved in lock
3611 : : * conflicts anyway - anything that might must be present in the main lock
3612 : : * table. (For the same reason, we don't sweat about making leaderPid
3613 : : * completely valid. We cannot safely dereference another backend's
3614 : : * lockGroupLeader field without holding all lock partition locks, and
3615 : : * it's not worth that.)
3616 : : */
3617 [ + + ]: 23513 : for (i = 0; i < ProcGlobal->allProcCount; ++i)
3618 : : {
3619 : 23286 : PGPROC *proc = &ProcGlobal->allProcs[i];
3620 : : uint32 f;
3621 : :
1430 tgl@sss.pgh.pa.us 3622 : 23286 : LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
3623 : :
4705 rhaas@postgresql.org 3624 [ + + ]: 395862 : for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3625 : : {
3626 : : LockInstanceData *instance;
3627 : 372576 : uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3628 : :
3629 : : /* Skip unallocated slots. */
3630 [ + + ]: 372576 : if (!lockbits)
3631 : 368150 : continue;
3632 : :
3633 [ + + ]: 4426 : if (el >= els)
3634 : : {
733 3635 : 16 : els += MaxBackends;
4705 3636 : 16 : data->locks = (LockInstanceData *)
3637 : 16 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3638 : : }
3639 : :
4653 3640 : 4426 : instance = &data->locks[el];
4705 3641 : 4426 : SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3642 : : proc->fpRelId[f]);
3643 : 4426 : instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3644 : 4426 : instance->waitLockMode = NoLock;
42 heikki.linnakangas@i 3645 :GNC 4426 : instance->vxid.procNumber = proc->vxid.procNumber;
3646 : 4426 : instance->vxid.localTransactionId = proc->vxid.lxid;
4705 rhaas@postgresql.org 3647 :CBC 4426 : instance->pid = proc->pid;
2974 tgl@sss.pgh.pa.us 3648 : 4426 : instance->leaderPid = proc->pid;
4705 rhaas@postgresql.org 3649 : 4426 : instance->fastpath = true;
3650 : :
3651 : : /*
3652 : : * Successfully taking fast path lock means there were no
3653 : : * conflicting locks.
3654 : : */
1154 fujii@postgresql.org 3655 : 4426 : instance->waitStart = 0;
3656 : :
4705 rhaas@postgresql.org 3657 : 4426 : el++;
3658 : : }
3659 : :
4637 3660 [ + + ]: 23286 : if (proc->fpVXIDLock)
3661 : : {
3662 : : VirtualTransactionId vxid;
3663 : : LockInstanceData *instance;
3664 : :
3665 [ + + ]: 1424 : if (el >= els)
3666 : : {
733 3667 : 4 : els += MaxBackends;
4637 3668 : 4 : data->locks = (LockInstanceData *)
3669 : 4 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3670 : : }
3671 : :
42 heikki.linnakangas@i 3672 :GNC 1424 : vxid.procNumber = proc->vxid.procNumber;
4637 rhaas@postgresql.org 3673 :CBC 1424 : vxid.localTransactionId = proc->fpLocalTransactionId;
3674 : :
3675 : 1424 : instance = &data->locks[el];
3676 : 1424 : SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3677 : 1424 : instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3678 : 1424 : instance->waitLockMode = NoLock;
42 heikki.linnakangas@i 3679 :GNC 1424 : instance->vxid.procNumber = proc->vxid.procNumber;
3680 : 1424 : instance->vxid.localTransactionId = proc->vxid.lxid;
4637 rhaas@postgresql.org 3681 :CBC 1424 : instance->pid = proc->pid;
2974 tgl@sss.pgh.pa.us 3682 : 1424 : instance->leaderPid = proc->pid;
4637 rhaas@postgresql.org 3683 : 1424 : instance->fastpath = true;
1154 fujii@postgresql.org 3684 : 1424 : instance->waitStart = 0;
3685 : :
4637 rhaas@postgresql.org 3686 : 1424 : el++;
3687 : : }
3688 : :
1430 tgl@sss.pgh.pa.us 3689 : 23286 : LWLockRelease(&proc->fpInfoLock);
3690 : : }
3691 : :
3692 : : /*
3693 : : * Next, acquire lock on the entire shared lock data structure. We do
3694 : : * this so that, at least for locks in the primary lock table, the state
3695 : : * will be self-consistent.
3696 : : *
3697 : : * Since this is a read-only operation, we take shared instead of
3698 : : * exclusive lock. There's not a whole lot of point to this, because all
3699 : : * the normal operations require exclusive lock, but it doesn't hurt
3700 : : * anything either. It will at least allow two backends to do
3701 : : * GetLockStatusData in parallel.
3702 : : *
3703 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3704 : : */
6699 3705 [ + + ]: 3859 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3730 rhaas@postgresql.org 3706 : 3632 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3707 : :
3708 : : /* Now we can safely count the number of proclocks */
4705 3709 : 227 : data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3710 [ + + ]: 227 : if (data->nelements > els)
3711 : : {
3712 : 13 : els = data->nelements;
3713 : 13 : data->locks = (LockInstanceData *)
3714 : 13 : repalloc(data->locks, sizeof(LockInstanceData) * els);
3715 : : }
3716 : :
3717 : : /* Now scan the tables to copy the data */
6475 tgl@sss.pgh.pa.us 3718 : 227 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3719 : :
3720 [ + + ]: 2737 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3721 : : {
3722 : 2510 : PGPROC *proc = proclock->tag.myProc;
3723 : 2510 : LOCK *lock = proclock->tag.myLock;
4326 bruce@momjian.us 3724 : 2510 : LockInstanceData *instance = &data->locks[el];
3725 : :
4705 rhaas@postgresql.org 3726 : 2510 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3727 : 2510 : instance->holdMask = proclock->holdMask;
3728 [ + + ]: 2510 : if (proc->waitLock == proclock->tag.myLock)
3729 : 11 : instance->waitLockMode = proc->waitLockMode;
3730 : : else
3731 : 2499 : instance->waitLockMode = NoLock;
42 heikki.linnakangas@i 3732 :GNC 2510 : instance->vxid.procNumber = proc->vxid.procNumber;
3733 : 2510 : instance->vxid.localTransactionId = proc->vxid.lxid;
4705 rhaas@postgresql.org 3734 :CBC 2510 : instance->pid = proc->pid;
2974 tgl@sss.pgh.pa.us 3735 : 2510 : instance->leaderPid = proclock->groupLeader->pid;
4705 rhaas@postgresql.org 3736 : 2510 : instance->fastpath = false;
1154 fujii@postgresql.org 3737 : 2510 : instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3738 : :
6475 tgl@sss.pgh.pa.us 3739 : 2510 : el++;
3740 : : }
3741 : :
3742 : : /*
3743 : : * And release locks. We do this in reverse order for two reasons: (1)
3744 : : * Anyone else who needs more than one of the locks will be trying to lock
3745 : : * them in increasing order; we don't want to release the other process
3746 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
3747 : : * behavior inside LWLockRelease.
3748 : : */
6402 bruce@momjian.us 3749 [ + + ]: 3859 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3730 rhaas@postgresql.org 3750 : 3632 : LWLockRelease(LockHashPartitionLockByIndex(i));
3751 : :
6699 tgl@sss.pgh.pa.us 3752 [ - + ]: 227 : Assert(el == data->nelements);
3753 : :
7897 3754 : 227 : return data;
3755 : : }
3756 : :
3757 : : /*
3758 : : * GetBlockerStatusData - Return a summary of the lock manager's state
3759 : : * concerning locks that are blocking the specified PID or any member of
3760 : : * the PID's lock group, for use in a user-level reporting function.
3761 : : *
3762 : : * For each PID within the lock group that is awaiting some heavyweight lock,
3763 : : * the return data includes an array of LockInstanceData objects, which are
3764 : : * the same data structure used by GetLockStatusData; but unlike that function,
3765 : : * this one reports only the PROCLOCKs associated with the lock that that PID
3766 : : * is blocked on. (Hence, all the locktags should be the same for any one
3767 : : * blocked PID.) In addition, we return an array of the PIDs of those backends
3768 : : * that are ahead of the blocked PID in the lock's wait queue. These can be
3769 : : * compared with the PIDs in the LockInstanceData objects to determine which
3770 : : * waiters are ahead of or behind the blocked PID in the queue.
3771 : : *
3772 : : * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3773 : : * waiting on any heavyweight lock, return empty arrays.
3774 : : *
3775 : : * The design goal is to hold the LWLocks for as short a time as possible;
3776 : : * thus, this function simply makes a copy of the necessary data and releases
3777 : : * the locks, allowing the caller to contemplate and format the data for as
3778 : : * long as it pleases.
3779 : : */
3780 : : BlockedProcsData *
2974 3781 : 1913 : GetBlockerStatusData(int blocked_pid)
3782 : : {
3783 : : BlockedProcsData *data;
3784 : : PGPROC *proc;
3785 : : int i;
3786 : :
3787 : 1913 : data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3788 : :
3789 : : /*
3790 : : * Guess how much space we'll need, and preallocate. Most of the time
3791 : : * this will avoid needing to do repalloc while holding the LWLocks. (We
3792 : : * assume, but check with an Assert, that MaxBackends is enough entries
3793 : : * for the procs[] array; the other two could need enlargement, though.)
3794 : : */
3795 : 1913 : data->nprocs = data->nlocks = data->npids = 0;
733 rhaas@postgresql.org 3796 : 1913 : data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
2974 tgl@sss.pgh.pa.us 3797 : 1913 : data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3798 : 1913 : data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3799 : 1913 : data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3800 : :
3801 : : /*
3802 : : * In order to search the ProcArray for blocked_pid and assume that that
3803 : : * entry won't immediately disappear under us, we must hold ProcArrayLock.
3804 : : * In addition, to examine the lock grouping fields of any other backend,
3805 : : * we must hold all the hash partition locks. (Only one of those locks is
3806 : : * actually relevant for any one lock group, but we can't know which one
3807 : : * ahead of time.) It's fairly annoying to hold all those locks
3808 : : * throughout this, but it's no worse than GetLockStatusData(), and it
3809 : : * does have the advantage that we're guaranteed to return a
3810 : : * self-consistent instantaneous state.
3811 : : */
3812 : 1913 : LWLockAcquire(ProcArrayLock, LW_SHARED);
3813 : :
3814 : 1913 : proc = BackendPidGetProcWithLock(blocked_pid);
3815 : :
3816 : : /* Nothing to do if it's gone */
3817 [ + - ]: 1913 : if (proc != NULL)
3818 : : {
3819 : : /*
3820 : : * Acquire lock on the entire shared lock data structure. See notes
3821 : : * in GetLockStatusData().
3822 : : */
3823 [ + + ]: 32521 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3824 : 30608 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3825 : :
3826 [ + + ]: 1913 : if (proc->lockGroupLeader == NULL)
3827 : : {
3828 : : /* Easy case, proc is not a lock group member */
3829 : 1807 : GetSingleProcBlockerStatusData(proc, data);
3830 : : }
3831 : : else
3832 : : {
3833 : : /* Examine all procs in proc's lock group */
3834 : : dlist_iter iter;
3835 : :
3836 [ + - + + ]: 331 : dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
3837 : : {
3838 : : PGPROC *memberProc;
3839 : :
3840 : 225 : memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3841 : 225 : GetSingleProcBlockerStatusData(memberProc, data);
3842 : : }
3843 : : }
3844 : :
3845 : : /*
3846 : : * And release locks. See notes in GetLockStatusData().
3847 : : */
3848 [ + + ]: 32521 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3849 : 30608 : LWLockRelease(LockHashPartitionLockByIndex(i));
3850 : :
3851 [ - + ]: 1913 : Assert(data->nprocs <= data->maxprocs);
3852 : : }
3853 : :
3854 : 1913 : LWLockRelease(ProcArrayLock);
3855 : :
3856 : 1913 : return data;
3857 : : }
3858 : :
3859 : : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3860 : : static void
3861 : 2032 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
3862 : : {
3863 : 2032 : LOCK *theLock = blocked_proc->waitLock;
3864 : : BlockedProcData *bproc;
3865 : : dlist_iter proclock_iter;
3866 : : dlist_iter proc_iter;
3867 : : dclist_head *waitQueue;
3868 : : int queue_size;
3869 : :
3870 : : /* Nothing to do if this proc is not blocked */
3871 [ + + ]: 2032 : if (theLock == NULL)
3872 : 913 : return;
3873 : :
3874 : : /* Set up a procs[] element */
3875 : 1119 : bproc = &data->procs[data->nprocs++];
3876 : 1119 : bproc->pid = blocked_proc->pid;
3877 : 1119 : bproc->first_lock = data->nlocks;
3878 : 1119 : bproc->first_waiter = data->npids;
3879 : :
3880 : : /*
3881 : : * We may ignore the proc's fast-path arrays, since nothing in those could
3882 : : * be related to a contended lock.
3883 : : */
3884 : :
3885 : : /* Collect all PROCLOCKs associated with theLock */
452 andres@anarazel.de 3886 [ + - + + ]: 3412 : dlist_foreach(proclock_iter, &theLock->procLocks)
3887 : : {
3888 : 2293 : PROCLOCK *proclock =
3889 : 2293 : dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
2974 tgl@sss.pgh.pa.us 3890 : 2293 : PGPROC *proc = proclock->tag.myProc;
3891 : 2293 : LOCK *lock = proclock->tag.myLock;
3892 : : LockInstanceData *instance;
3893 : :
3894 [ - + ]: 2293 : if (data->nlocks >= data->maxlocks)
3895 : : {
733 rhaas@postgresql.org 3896 :UBC 0 : data->maxlocks += MaxBackends;
2974 tgl@sss.pgh.pa.us 3897 : 0 : data->locks = (LockInstanceData *)
3898 : 0 : repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3899 : : }
3900 : :
2974 tgl@sss.pgh.pa.us 3901 :CBC 2293 : instance = &data->locks[data->nlocks];
3902 : 2293 : memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3903 : 2293 : instance->holdMask = proclock->holdMask;
3904 [ + + ]: 2293 : if (proc->waitLock == lock)
3905 : 1166 : instance->waitLockMode = proc->waitLockMode;
3906 : : else
3907 : 1127 : instance->waitLockMode = NoLock;
42 heikki.linnakangas@i 3908 :GNC 2293 : instance->vxid.procNumber = proc->vxid.procNumber;
3909 : 2293 : instance->vxid.localTransactionId = proc->vxid.lxid;
2974 tgl@sss.pgh.pa.us 3910 :CBC 2293 : instance->pid = proc->pid;
3911 : 2293 : instance->leaderPid = proclock->groupLeader->pid;
3912 : 2293 : instance->fastpath = false;
3913 : 2293 : data->nlocks++;
3914 : : }
3915 : :
3916 : : /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3917 : 1119 : waitQueue = &(theLock->waitProcs);
452 andres@anarazel.de 3918 : 1119 : queue_size = dclist_count(waitQueue);
3919 : :
2974 tgl@sss.pgh.pa.us 3920 [ - + ]: 1119 : if (queue_size > data->maxpids - data->npids)
3921 : : {
733 rhaas@postgresql.org 3922 :UBC 0 : data->maxpids = Max(data->maxpids + MaxBackends,
3923 : : data->npids + queue_size);
2974 tgl@sss.pgh.pa.us 3924 : 0 : data->waiter_pids = (int *) repalloc(data->waiter_pids,
3925 : 0 : sizeof(int) * data->maxpids);
3926 : : }
3927 : :
3928 : : /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
452 andres@anarazel.de 3929 [ + - + - ]:CBC 1142 : dclist_foreach(proc_iter, waitQueue)
3930 : : {
3931 : 1142 : PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3932 : :
557 drowley@postgresql.o 3933 [ + + ]: 1142 : if (queued_proc == blocked_proc)
2974 tgl@sss.pgh.pa.us 3934 : 1119 : break;
557 drowley@postgresql.o 3935 : 23 : data->waiter_pids[data->npids++] = queued_proc->pid;
3936 : 23 : queued_proc = (PGPROC *) queued_proc->links.next;
3937 : : }
3938 : :
2974 tgl@sss.pgh.pa.us 3939 : 1119 : bproc->num_locks = data->nlocks - bproc->first_lock;
3940 : 1119 : bproc->num_waiters = data->npids - bproc->first_waiter;
3941 : : }
3942 : :
3943 : : /*
3944 : : * Returns a list of currently held AccessExclusiveLocks, for use by
3945 : : * LogStandbySnapshot(). The result is a palloc'd array,
3946 : : * with the number of elements returned into *nlocks.
3947 : : *
3948 : : * XXX This currently takes a lock on all partitions of the lock table,
3949 : : * but it's possible to do better. By reference counting locks and storing
3950 : : * the value in the ProcArray entry for each backend we could tell if any
3951 : : * locks need recording without having to acquire the partition locks and
3952 : : * scan the lock table. Whether that's worth the additional overhead
3953 : : * is pretty dubious though.
3954 : : */
3955 : : xl_standby_lock *
5230 simon@2ndQuadrant.co 3956 : 952 : GetRunningTransactionLocks(int *nlocks)
3957 : : {
3958 : : xl_standby_lock *accessExclusiveLocks;
3959 : : PROCLOCK *proclock;
3960 : : HASH_SEQ_STATUS seqstat;
3961 : : int i;
3962 : : int index;
3963 : : int els;
3964 : :
3965 : : /*
3966 : : * Acquire lock on the entire shared lock data structure.
3967 : : *
3968 : : * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3969 : : */
3970 [ + + ]: 16184 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3730 rhaas@postgresql.org 3971 : 15232 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
3972 : :
3973 : : /* Now we can safely count the number of proclocks */
5230 simon@2ndQuadrant.co 3974 : 952 : els = hash_get_num_entries(LockMethodProcLockHash);
3975 : :
3976 : : /*
3977 : : * Allocating enough space for all locks in the lock table is overkill,
3978 : : * but it's more convenient and faster than having to enlarge the array.
3979 : : */
3980 : 952 : accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3981 : :
3982 : : /* Now scan the tables to copy the data */
4114 tgl@sss.pgh.pa.us 3983 : 952 : hash_seq_init(&seqstat, LockMethodProcLockHash);
3984 : :
3985 : : /*
3986 : : * If lock is a currently granted AccessExclusiveLock then it will have
3987 : : * just one proclock holder, so locks are never accessed twice in this
3988 : : * particular case. Don't copy this code for use elsewhere because in the
3989 : : * general case this will give you duplicate locks when looking at
3990 : : * non-exclusive lock types.
3991 : : */
5230 simon@2ndQuadrant.co 3992 : 952 : index = 0;
3993 [ + + ]: 3333 : while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3994 : : {
3995 : : /* make sure this definition matches the one used in LockAcquire */
3996 [ + + ]: 2381 : if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3997 [ + + ]: 733 : proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3998 : : {
5161 bruce@momjian.us 3999 : 378 : PGPROC *proc = proclock->tag.myProc;
4000 : 378 : LOCK *lock = proclock->tag.myLock;
1339 andres@anarazel.de 4001 : 378 : TransactionId xid = proc->xid;
4002 : :
4003 : : /*
4004 : : * Don't record locks for transactions if we know they have
4005 : : * already issued their WAL record for commit but not yet released
4006 : : * lock. It is still possible that we see locks held by already
4007 : : * complete transactions, if they haven't yet zeroed their xids.
4008 : : */
4465 simon@2ndQuadrant.co 4009 [ + + ]: 378 : if (!TransactionIdIsValid(xid))
4465 simon@2ndQuadrant.co 4010 :GBC 3 : continue;
4011 : :
4465 simon@2ndQuadrant.co 4012 :CBC 375 : accessExclusiveLocks[index].xid = xid;
5161 bruce@momjian.us 4013 : 375 : accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
5230 simon@2ndQuadrant.co 4014 : 375 : accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4015 : :
4016 : 375 : index++;
4017 : : }
4018 : : }
4019 : :
3967 tgl@sss.pgh.pa.us 4020 [ - + ]: 952 : Assert(index <= els);
4021 : :
4022 : : /*
4023 : : * And release locks. We do this in reverse order for two reasons: (1)
4024 : : * Anyone else who needs more than one of the locks will be trying to lock
4025 : : * them in increasing order; we don't want to release the other process
4026 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
4027 : : * behavior inside LWLockRelease.
4028 : : */
5230 simon@2ndQuadrant.co 4029 [ + + ]: 16184 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3730 rhaas@postgresql.org 4030 : 15232 : LWLockRelease(LockHashPartitionLockByIndex(i));
4031 : :
5230 simon@2ndQuadrant.co 4032 : 952 : *nlocks = index;
4033 : 952 : return accessExclusiveLocks;
4034 : : }
4035 : :
4036 : : /* Provide the textual name of any lock mode */
4037 : : const char *
6701 tgl@sss.pgh.pa.us 4038 : 8624 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
4039 : : {
4040 [ + - - + ]: 8624 : Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4041 [ + - - + ]: 8624 : Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4042 : 8624 : return LockMethods[lockmethodid]->lockModeNames[mode];
4043 : : }
4044 : :
4045 : : #ifdef LOCK_DEBUG
4046 : : /*
4047 : : * Dump all locks in the given proc's myProcLocks lists.
4048 : : *
4049 : : * Caller is responsible for having acquired appropriate LWLocks.
4050 : : */
4051 : : void
4052 : : DumpLocks(PGPROC *proc)
4053 : : {
4054 : : int i;
4055 : :
4056 : : if (proc == NULL)
4057 : : return;
4058 : :
4059 : : if (proc->waitLock)
4060 : : LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4061 : :
4062 : : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4063 : : {
4064 : : dlist_head *procLocks = &proc->myProcLocks[i];
4065 : : dlist_iter iter;
4066 : :
4067 : : dlist_foreach(iter, procLocks)
4068 : : {
4069 : : PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4070 : : LOCK *lock = proclock->tag.myLock;
4071 : :
4072 : : Assert(proclock->tag.myProc == proc);
4073 : : PROCLOCK_PRINT("DumpLocks", proclock);
4074 : : LOCK_PRINT("DumpLocks", lock, 0);
4075 : : }
4076 : : }
4077 : : }
4078 : :
4079 : : /*
4080 : : * Dump all lmgr locks.
4081 : : *
4082 : : * Caller is responsible for having acquired appropriate LWLocks.
4083 : : */
4084 : : void
4085 : : DumpAllLocks(void)
4086 : : {
4087 : : PGPROC *proc;
4088 : : PROCLOCK *proclock;
4089 : : LOCK *lock;
4090 : : HASH_SEQ_STATUS status;
4091 : :
4092 : : proc = MyProc;
4093 : :
4094 : : if (proc && proc->waitLock)
4095 : : LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4096 : :
4097 : : hash_seq_init(&status, LockMethodProcLockHash);
4098 : :
4099 : : while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4100 : : {
4101 : : PROCLOCK_PRINT("DumpAllLocks", proclock);
4102 : :
4103 : : lock = proclock->tag.myLock;
4104 : : if (lock)
4105 : : LOCK_PRINT("DumpAllLocks", lock, 0);
4106 : : else
4107 : : elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4108 : : }
4109 : : }
4110 : : #endif /* LOCK_DEBUG */
4111 : :
4112 : : /*
4113 : : * LOCK 2PC resource manager's routines
4114 : : */
4115 : :
4116 : : /*
4117 : : * Re-acquire a lock belonging to a transaction that was prepared.
4118 : : *
4119 : : * Because this function is run at db startup, re-acquiring the locks should
4120 : : * never conflict with running transactions because there are none. We
4121 : : * assume that the lock state represented by the stored 2PC files is legal.
4122 : : *
4123 : : * When switching from Hot Standby mode to normal operation, the locks will
4124 : : * be already held by the startup process. The locks are acquired for the new
4125 : : * procs without checking for conflicts, so we don't get a conflict between the
4126 : : * startup process and the dummy procs, even though we will momentarily have
4127 : : * a situation where two procs are holding the same AccessExclusiveLock,
4128 : : * which isn't normally possible because the conflict. If we're in standby
4129 : : * mode, but a recovery snapshot hasn't been established yet, it's possible
4130 : : * that some but not all of the locks are already held by the startup process.
4131 : : *
4132 : : * This approach is simple, but also a bit dangerous, because if there isn't
4133 : : * enough shared memory to acquire the locks, an error will be thrown, which
4134 : : * is promoted to FATAL and recovery will abort, bringing down postmaster.
4135 : : * A safer approach would be to transfer the locks like we do in
4136 : : * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4137 : : * read-only backends to use up all the shared lock memory anyway, so that
4138 : : * replaying the WAL record that needs to acquire a lock will throw an error
4139 : : * and PANIC anyway.
4140 : : */
4141 : : void
6876 4142 : 79 : lock_twophase_recover(TransactionId xid, uint16 info,
4143 : : void *recdata, uint32 len)
4144 : : {
4145 : 79 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
1875 michael@paquier.xyz 4146 : 79 : PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4147 : : LOCKTAG *locktag;
4148 : : LOCKMODE lockmode;
4149 : : LOCKMETHODID lockmethodid;
4150 : : LOCK *lock;
4151 : : PROCLOCK *proclock;
4152 : : PROCLOCKTAG proclocktag;
4153 : : bool found;
4154 : : uint32 hashcode;
4155 : : uint32 proclock_hashcode;
4156 : : int partition;
4157 : : LWLock *partitionLock;
4158 : : LockMethod lockMethodTable;
4159 : :
6876 tgl@sss.pgh.pa.us 4160 [ - + ]: 79 : Assert(len == sizeof(TwoPhaseLockRecord));
4161 : 79 : locktag = &rec->locktag;
4162 : 79 : lockmode = rec->lockmode;
4163 : 79 : lockmethodid = locktag->locktag_lockmethodid;
4164 : :
6701 4165 [ + - - + ]: 79 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6876 tgl@sss.pgh.pa.us 4166 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6701 tgl@sss.pgh.pa.us 4167 :CBC 79 : lockMethodTable = LockMethods[lockmethodid];
4168 : :
6475 4169 : 79 : hashcode = LockTagHashCode(locktag);
4170 : 79 : partition = LockHashPartition(hashcode);
4171 : 79 : partitionLock = LockHashPartitionLock(hashcode);
4172 : :
6699 4173 : 79 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4174 : :
4175 : : /*
4176 : : * Find or create a lock with this tag.
4177 : : */
6475 4178 : 79 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4179 : : locktag,
4180 : : hashcode,
4181 : : HASH_ENTER_NULL,
4182 : : &found);
6876 4183 [ - + ]: 79 : if (!lock)
4184 : : {
6699 tgl@sss.pgh.pa.us 4185 :UBC 0 : LWLockRelease(partitionLock);
6876 4186 [ # # ]: 0 : ereport(ERROR,
4187 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4188 : : errmsg("out of shared memory"),
4189 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4190 : : }
4191 : :
4192 : : /*
4193 : : * if it's a new lock object, initialize it
4194 : : */
6876 tgl@sss.pgh.pa.us 4195 [ + + ]:CBC 79 : if (!found)
4196 : : {
4197 : 71 : lock->grantMask = 0;
4198 : 71 : lock->waitMask = 0;
452 andres@anarazel.de 4199 : 71 : dlist_init(&lock->procLocks);
4200 : 71 : dclist_init(&lock->waitProcs);
6876 tgl@sss.pgh.pa.us 4201 : 71 : lock->nRequested = 0;
4202 : 71 : lock->nGranted = 0;
4203 [ + - + - : 426 : MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
+ - + - +
+ ]
4204 [ - + - - : 71 : MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
- - - - -
- ]
4205 : : LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4206 : : }
4207 : : else
4208 : : {
4209 : : LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4210 [ + - - + ]: 8 : Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4211 [ + - - + ]: 8 : Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4212 [ - + ]: 8 : Assert(lock->nGranted <= lock->nRequested);
4213 : : }
4214 : :
4215 : : /*
4216 : : * Create the hash key for the proclock table.
4217 : : */
6475 4218 : 79 : proclocktag.myLock = lock;
4219 : 79 : proclocktag.myProc = proc;
4220 : :
4221 : 79 : proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4222 : :
4223 : : /*
4224 : : * Find or create a proclock entry with this tag
4225 : : */
4226 : 79 : proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4227 : : &proclocktag,
4228 : : proclock_hashcode,
4229 : : HASH_ENTER_NULL,
4230 : : &found);
6876 4231 [ - + ]: 79 : if (!proclock)
4232 : : {
4233 : : /* Oops, not enough shmem for the proclock */
6876 tgl@sss.pgh.pa.us 4234 [ # # ]:UBC 0 : if (lock->nRequested == 0)
4235 : : {
4236 : : /*
4237 : : * There are no other requestors of this lock, so garbage-collect
4238 : : * the lock object. We *must* do this to avoid a permanent leak
4239 : : * of shared memory, because there won't be anything to cause
4240 : : * anyone to release the lock object later.
4241 : : */
452 andres@anarazel.de 4242 [ # # ]: 0 : Assert(dlist_is_empty(&lock->procLocks));
6475 tgl@sss.pgh.pa.us 4243 [ # # ]: 0 : if (!hash_search_with_hash_value(LockMethodLockHash,
433 peter@eisentraut.org 4244 : 0 : &(lock->tag),
4245 : : hashcode,
4246 : : HASH_REMOVE,
4247 : : NULL))
6876 tgl@sss.pgh.pa.us 4248 [ # # ]: 0 : elog(PANIC, "lock table corrupted");
4249 : : }
6699 4250 : 0 : LWLockRelease(partitionLock);
6876 4251 [ # # ]: 0 : ereport(ERROR,
4252 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4253 : : errmsg("out of shared memory"),
4254 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4255 : : }
4256 : :
4257 : : /*
4258 : : * If new, initialize the new entry
4259 : : */
6876 tgl@sss.pgh.pa.us 4260 [ + + ]:CBC 79 : if (!found)
4261 : : {
2989 rhaas@postgresql.org 4262 [ - + ]: 75 : Assert(proc->lockGroupLeader == NULL);
4263 : 75 : proclock->groupLeader = proc;
6876 tgl@sss.pgh.pa.us 4264 : 75 : proclock->holdMask = 0;
4265 : 75 : proclock->releaseMask = 0;
4266 : : /* Add proclock to appropriate lists */
452 andres@anarazel.de 4267 : 75 : dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4268 : 75 : dlist_push_tail(&proc->myProcLocks[partition],
4269 : : &proclock->procLink);
4270 : : PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4271 : : }
4272 : : else
4273 : : {
4274 : : PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
6876 tgl@sss.pgh.pa.us 4275 [ - + ]: 4 : Assert((proclock->holdMask & ~lock->grantMask) == 0);
4276 : : }
4277 : :
4278 : : /*
4279 : : * lock->nRequested and lock->requested[] count the total number of
4280 : : * requests, whether granted or waiting, so increment those immediately.
4281 : : */
4282 : 79 : lock->nRequested++;
4283 : 79 : lock->requested[lockmode]++;
4284 [ + - - + ]: 79 : Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4285 : :
4286 : : /*
4287 : : * We shouldn't already hold the desired lock.
4288 : : */
4289 [ - + ]: 79 : if (proclock->holdMask & LOCKBIT_ON(lockmode))
6876 tgl@sss.pgh.pa.us 4290 [ # # ]:UBC 0 : elog(ERROR, "lock %s on object %u/%u/%u is already held",
4291 : : lockMethodTable->lockModeNames[lockmode],
4292 : : lock->tag.locktag_field1, lock->tag.locktag_field2,
4293 : : lock->tag.locktag_field3);
4294 : :
4295 : : /*
4296 : : * We ignore any possible conflicts and just grant ourselves the lock. Not
4297 : : * only because we don't bother, but also to avoid deadlocks when
4298 : : * switching from standby to normal mode. See function comment.
4299 : : */
6876 tgl@sss.pgh.pa.us 4300 :CBC 79 : GrantLock(lock, proclock, lockmode);
4301 : :
4302 : : /*
4303 : : * Bump strong lock count, to make sure any fast-path lock requests won't
4304 : : * be granted without consulting the primary lock table.
4305 : : */
4337 rhaas@postgresql.org 4306 [ + - + + : 79 : if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
+ - + + ]
4307 : : {
4326 bruce@momjian.us 4308 : 14 : uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4309 : :
4653 rhaas@postgresql.org 4310 [ - + ]: 14 : SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4311 : 14 : FastPathStrongRelationLocks->count[fasthashcode]++;
4312 : 14 : SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4313 : : }
4314 : :
6699 tgl@sss.pgh.pa.us 4315 : 79 : LWLockRelease(partitionLock);
6876 4316 : 79 : }
4317 : :
4318 : : /*
4319 : : * Re-acquire a lock belonging to a transaction that was prepared, when
4320 : : * starting up into hot standby mode.
4321 : : */
4322 : : void
5230 simon@2ndQuadrant.co 4323 :UBC 0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
4324 : : void *recdata, uint32 len)
4325 : : {
4326 : 0 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4327 : : LOCKTAG *locktag;
4328 : : LOCKMODE lockmode;
4329 : : LOCKMETHODID lockmethodid;
4330 : :
4331 [ # # ]: 0 : Assert(len == sizeof(TwoPhaseLockRecord));
4332 : 0 : locktag = &rec->locktag;
4333 : 0 : lockmode = rec->lockmode;
4334 : 0 : lockmethodid = locktag->locktag_lockmethodid;
4335 : :
4336 [ # # # # ]: 0 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4337 [ # # ]: 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4338 : :
4339 [ # # ]: 0 : if (lockmode == AccessExclusiveLock &&
4340 [ # # ]: 0 : locktag->locktag_type == LOCKTAG_RELATION)
4341 : : {
4342 : 0 : StandbyAcquireAccessExclusiveLock(xid,
4343 : : locktag->locktag_field1 /* dboid */ ,
4344 : : locktag->locktag_field2 /* reloid */ );
4345 : : }
4346 : 0 : }
4347 : :
4348 : :
4349 : : /*
4350 : : * 2PC processing routine for COMMIT PREPARED case.
4351 : : *
4352 : : * Find and release the lock indicated by the 2PC record.
4353 : : */
4354 : : void
6876 tgl@sss.pgh.pa.us 4355 :CBC 912 : lock_twophase_postcommit(TransactionId xid, uint16 info,
4356 : : void *recdata, uint32 len)
4357 : : {
4358 : 912 : TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
1875 michael@paquier.xyz 4359 : 912 : PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4360 : : LOCKTAG *locktag;
4361 : : LOCKMETHODID lockmethodid;
4362 : : LockMethod lockMethodTable;
4363 : :
6876 tgl@sss.pgh.pa.us 4364 [ - + ]: 912 : Assert(len == sizeof(TwoPhaseLockRecord));
4365 : 912 : locktag = &rec->locktag;
4366 : 912 : lockmethodid = locktag->locktag_lockmethodid;
4367 : :
6701 4368 [ + - - + ]: 912 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
6876 tgl@sss.pgh.pa.us 4369 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
6701 tgl@sss.pgh.pa.us 4370 :CBC 912 : lockMethodTable = LockMethods[lockmethodid];
4371 : :
4705 rhaas@postgresql.org 4372 : 912 : LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
6876 tgl@sss.pgh.pa.us 4373 : 912 : }
4374 : :
4375 : : /*
4376 : : * 2PC processing routine for ROLLBACK PREPARED case.
4377 : : *
4378 : : * This is actually just the same as the COMMIT case.
4379 : : */
4380 : : void
4381 : 132 : lock_twophase_postabort(TransactionId xid, uint16 info,
4382 : : void *recdata, uint32 len)
4383 : : {
4384 : 132 : lock_twophase_postcommit(xid, info, recdata, len);
4385 : 132 : }
4386 : :
4387 : : /*
4388 : : * VirtualXactLockTableInsert
4389 : : *
4390 : : * Take vxid lock via the fast-path. There can't be any pre-existing
4391 : : * lockers, as we haven't advertised this vxid via the ProcArray yet.
4392 : : *
4393 : : * Since MyProc->fpLocalTransactionId will normally contain the same data
4394 : : * as MyProc->vxid.lxid, you might wonder if we really need both. The
4395 : : * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4396 : : * examined by procarray.c, while fpLocalTransactionId is protected by
4397 : : * fpInfoLock and is used only by the locking subsystem. Doing it this
4398 : : * way makes it easier to verify that there are no funny race conditions.
4399 : : *
4400 : : * We don't bother recording this lock in the local lock table, since it's
4401 : : * only ever released at the end of a transaction. Instead,
4402 : : * LockReleaseAll() calls VirtualXactLockTableCleanup().
4403 : : */
4404 : : void
4637 rhaas@postgresql.org 4405 : 433063 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
4406 : : {
4407 [ - + ]: 433063 : Assert(VirtualTransactionIdIsValid(vxid));
4408 : :
1430 tgl@sss.pgh.pa.us 4409 : 433063 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4410 : :
42 heikki.linnakangas@i 4411 [ - + ]:GNC 433063 : Assert(MyProc->vxid.procNumber == vxid.procNumber);
4637 rhaas@postgresql.org 4412 [ - + ]:CBC 433063 : Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
4413 [ - + ]: 433063 : Assert(MyProc->fpVXIDLock == false);
4414 : :
4415 : 433063 : MyProc->fpVXIDLock = true;
4416 : 433063 : MyProc->fpLocalTransactionId = vxid.localTransactionId;
4417 : :
1430 tgl@sss.pgh.pa.us 4418 : 433063 : LWLockRelease(&MyProc->fpInfoLock);
4637 rhaas@postgresql.org 4419 : 433063 : }
4420 : :
4421 : : /*
4422 : : * VirtualXactLockTableCleanup
4423 : : *
4424 : : * Check whether a VXID lock has been materialized; if so, release it,
4425 : : * unblocking waiters.
4426 : : */
4427 : : void
3790 tgl@sss.pgh.pa.us 4428 : 433447 : VirtualXactLockTableCleanup(void)
4429 : : {
4430 : : bool fastpath;
4431 : : LocalTransactionId lxid;
4432 : :
42 heikki.linnakangas@i 4433 [ - + ]:GNC 433447 : Assert(MyProc->vxid.procNumber != INVALID_PROC_NUMBER);
4434 : :
4435 : : /*
4436 : : * Clean up shared memory state.
4437 : : */
1430 tgl@sss.pgh.pa.us 4438 :CBC 433447 : LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
4439 : :
4637 rhaas@postgresql.org 4440 : 433447 : fastpath = MyProc->fpVXIDLock;
4441 : 433447 : lxid = MyProc->fpLocalTransactionId;
4442 : 433447 : MyProc->fpVXIDLock = false;
4443 : 433447 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
4444 : :
1430 tgl@sss.pgh.pa.us 4445 : 433447 : LWLockRelease(&MyProc->fpInfoLock);
4446 : :
4447 : : /*
4448 : : * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4449 : : * that means someone transferred the lock to the main lock table.
4450 : : */
4637 rhaas@postgresql.org 4451 [ + + + + ]: 433447 : if (!fastpath && LocalTransactionIdIsValid(lxid))
4452 : : {
4453 : : VirtualTransactionId vxid;
4454 : : LOCKTAG locktag;
4455 : :
42 heikki.linnakangas@i 4456 :GNC 241 : vxid.procNumber = MyProcNumber;
4637 rhaas@postgresql.org 4457 :CBC 241 : vxid.localTransactionId = lxid;
4458 : 241 : SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4459 : :
4460 : 241 : LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
4461 : : &locktag, ExclusiveLock, false);
4462 : : }
4463 : 433447 : }
4464 : :
4465 : : /*
4466 : : * XactLockForVirtualXact
4467 : : *
4468 : : * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4469 : : * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4470 : : * functions, it assumes "xid" is never a subtransaction and that "xid" is
4471 : : * prepared, committed, or aborted.
4472 : : *
4473 : : * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4474 : : * known as "vxid" before its PREPARE TRANSACTION.
4475 : : */
4476 : : static bool
904 noah@leadboat.com 4477 : 260 : XactLockForVirtualXact(VirtualTransactionId vxid,
4478 : : TransactionId xid, bool wait)
4479 : : {
4480 : 260 : bool more = false;
4481 : :
4482 : : /* There is no point to wait for 2PCs if you have no 2PCs. */
4483 [ + + ]: 260 : if (max_prepared_xacts == 0)
4484 : 61 : return true;
4485 : :
4486 : : do
4487 : : {
4488 : : LockAcquireResult lar;
4489 : : LOCKTAG tag;
4490 : :
4491 : : /* Clear state from previous iterations. */
4492 [ - + ]: 199 : if (more)
4493 : : {
904 noah@leadboat.com 4494 :UBC 0 : xid = InvalidTransactionId;
4495 : 0 : more = false;
4496 : : }
4497 : :
4498 : : /* If we have no xid, try to find one. */
904 noah@leadboat.com 4499 [ + + ]:CBC 199 : if (!TransactionIdIsValid(xid))
4500 : 110 : xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4501 [ + + ]: 199 : if (!TransactionIdIsValid(xid))
4502 : : {
4503 [ - + ]: 91 : Assert(!more);
4504 : 91 : return true;
4505 : : }
4506 : :
4507 : : /* Check or wait for XID completion. */
4508 : 108 : SET_LOCKTAG_TRANSACTION(tag, xid);
4509 : 108 : lar = LockAcquire(&tag, ShareLock, false, !wait);
4510 [ - + ]: 108 : if (lar == LOCKACQUIRE_NOT_AVAIL)
904 noah@leadboat.com 4511 :UBC 0 : return false;
904 noah@leadboat.com 4512 :CBC 108 : LockRelease(&tag, ShareLock, false);
4513 [ - + ]: 108 : } while (more);
4514 : :
4515 : 108 : return true;
4516 : : }
4517 : :
4518 : : /*
4519 : : * VirtualXactLock
4520 : : *
4521 : : * If wait = true, wait as long as the given VXID or any XID acquired by the
4522 : : * same transaction is still running. Then, return true.
4523 : : *
4524 : : * If wait = false, just check whether that VXID or one of those XIDs is still
4525 : : * running, and return true or false.
4526 : : */
4527 : : bool
4637 rhaas@postgresql.org 4528 : 300 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
4529 : : {
4530 : : LOCKTAG tag;
4531 : : PGPROC *proc;
904 noah@leadboat.com 4532 : 300 : TransactionId xid = InvalidTransactionId;
4533 : :
4637 rhaas@postgresql.org 4534 [ - + ]: 300 : Assert(VirtualTransactionIdIsValid(vxid));
4535 : :
904 noah@leadboat.com 4536 [ + + ]: 300 : if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
4537 : : /* no vxid lock; localTransactionId is a normal, locked XID */
4538 : 1 : return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4539 : :
4637 rhaas@postgresql.org 4540 : 299 : SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4541 : :
4542 : : /*
4543 : : * If a lock table entry must be made, this is the PGPROC on whose behalf
4544 : : * it must be done. Note that the transaction might end or the PGPROC
4545 : : * might be reassigned to a new backend before we get around to examining
4546 : : * it, but it doesn't matter. If we find upon examination that the
4547 : : * relevant lxid is no longer running here, that's enough to prove that
4548 : : * it's no longer running anywhere.
4549 : : */
42 heikki.linnakangas@i 4550 :GNC 299 : proc = ProcNumberGetProc(vxid.procNumber);
4569 rhaas@postgresql.org 4551 [ + + ]:CBC 299 : if (proc == NULL)
904 noah@leadboat.com 4552 :GBC 3 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4553 : :
4554 : : /*
4555 : : * We must acquire this lock before checking the procNumber and lxid
4556 : : * against the ones we're waiting for. The target backend will only set
4557 : : * or clear lxid while holding this lock.
4558 : : */
1430 tgl@sss.pgh.pa.us 4559 :CBC 296 : LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
4560 : :
42 heikki.linnakangas@i 4561 [ + - ]:GNC 296 : if (proc->vxid.procNumber != vxid.procNumber
4637 rhaas@postgresql.org 4562 [ + + ]:CBC 296 : || proc->fpLocalTransactionId != vxid.localTransactionId)
4563 : : {
4564 : : /* VXID ended */
1430 tgl@sss.pgh.pa.us 4565 : 36 : LWLockRelease(&proc->fpInfoLock);
904 noah@leadboat.com 4566 : 36 : return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4567 : : }
4568 : :
4569 : : /*
4570 : : * If we aren't asked to wait, there's no need to set up a lock table
4571 : : * entry. The transaction is still in progress, so just return false.
4572 : : */
4637 rhaas@postgresql.org 4573 [ + + ]: 260 : if (!wait)
4574 : : {
1430 tgl@sss.pgh.pa.us 4575 :GBC 15 : LWLockRelease(&proc->fpInfoLock);
4637 rhaas@postgresql.org 4576 : 15 : return false;
4577 : : }
4578 : :
4579 : : /*
4580 : : * OK, we're going to need to sleep on the VXID. But first, we must set
4581 : : * up the primary lock table entry, if needed (ie, convert the proc's
4582 : : * fast-path lock on its VXID to a regular lock).
4583 : : */
4637 rhaas@postgresql.org 4584 [ + + ]:CBC 245 : if (proc->fpVXIDLock)
4585 : : {
4586 : : PROCLOCK *proclock;
4587 : : uint32 hashcode;
4588 : : LWLock *partitionLock;
4589 : :
4590 : 241 : hashcode = LockTagHashCode(&tag);
4591 : :
4114 tgl@sss.pgh.pa.us 4592 : 241 : partitionLock = LockHashPartitionLock(hashcode);
4593 : 241 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4594 : :
4637 rhaas@postgresql.org 4595 : 241 : proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4596 : : &tag, hashcode, ExclusiveLock);
4597 [ - + ]: 241 : if (!proclock)
4598 : : {
4114 tgl@sss.pgh.pa.us 4599 :UBC 0 : LWLockRelease(partitionLock);
1430 4600 : 0 : LWLockRelease(&proc->fpInfoLock);
4637 rhaas@postgresql.org 4601 [ # # ]: 0 : ereport(ERROR,
4602 : : (errcode(ERRCODE_OUT_OF_MEMORY),
4603 : : errmsg("out of shared memory"),
4604 : : errhint("You might need to increase %s.", "max_locks_per_transaction")));
4605 : : }
4637 rhaas@postgresql.org 4606 :CBC 241 : GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4607 : :
4114 tgl@sss.pgh.pa.us 4608 : 241 : LWLockRelease(partitionLock);
4609 : :
4637 rhaas@postgresql.org 4610 : 241 : proc->fpVXIDLock = false;
4611 : : }
4612 : :
4613 : : /*
4614 : : * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4615 : : * search. The proc might have assigned this XID but not yet locked it,
4616 : : * in which case the proc will lock this XID before releasing the VXID.
4617 : : * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4618 : : * so we won't save an XID of a different VXID. It doesn't matter whether
4619 : : * we save this before or after setting up the primary lock table entry.
4620 : : */
904 noah@leadboat.com 4621 : 245 : xid = proc->xid;
4622 : :
4623 : : /* Done with proc->fpLockBits */
1430 tgl@sss.pgh.pa.us 4624 : 245 : LWLockRelease(&proc->fpInfoLock);
4625 : :
4626 : : /* Time to wait. */
4637 rhaas@postgresql.org 4627 : 245 : (void) LockAcquire(&tag, ShareLock, false, false);
4628 : :
4629 : 220 : LockRelease(&tag, ShareLock, false);
904 noah@leadboat.com 4630 : 220 : return XactLockForVirtualXact(vxid, xid, wait);
4631 : : }
4632 : :
4633 : : /*
4634 : : * LockWaiterCount
4635 : : *
4636 : : * Find the number of lock requester on this locktag
4637 : : */
4638 : : int
2928 rhaas@postgresql.org 4639 : 57589 : LockWaiterCount(const LOCKTAG *locktag)
4640 : : {
4641 : 57589 : LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4642 : : LOCK *lock;
4643 : : bool found;
4644 : : uint32 hashcode;
4645 : : LWLock *partitionLock;
4646 : 57589 : int waiters = 0;
4647 : :
4648 [ + - - + ]: 57589 : if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2928 rhaas@postgresql.org 4649 [ # # ]:UBC 0 : elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4650 : :
2928 rhaas@postgresql.org 4651 :CBC 57589 : hashcode = LockTagHashCode(locktag);
4652 : 57589 : partitionLock = LockHashPartitionLock(hashcode);
4653 : 57589 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4654 : :
4655 : 57589 : lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4656 : : locktag,
4657 : : hashcode,
4658 : : HASH_FIND,
4659 : : &found);
4660 [ + + ]: 57589 : if (found)
4661 : : {
4662 [ - + ]: 19 : Assert(lock != NULL);
4663 : 19 : waiters = lock->nRequested;
4664 : : }
4665 : 57589 : LWLockRelease(partitionLock);
4666 : :
4667 : 57589 : return waiters;
4668 : : }
|