LCOV - differential code coverage report
Current view: top level - src/backend/storage/lmgr - lock.c (source / functions) Coverage Total Hit UNC LBC UIC UBC GBC GIC GNC CBC EUB ECB DUB DCB
Current: Differential Code Coverage HEAD vs 15 Lines: 89.1 % 1328 1183 4 22 59 60 23 636 47 477 55 642 7 42
Current Date: 2023-04-08 15:15:32 Functions: 96.6 % 58 56 1 1 44 2 10 1 44
Baseline: 15
Baseline Date: 2023-04-08 15:09:40
Legend: Lines: hit not hit

           TLA  Line data    Source code
       1                 : /*-------------------------------------------------------------------------
       2                 :  *
       3                 :  * lock.c
       4                 :  *    POSTGRES primary lock mechanism
       5                 :  *
       6                 :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7                 :  * Portions Copyright (c) 1994, Regents of the University of California
       8                 :  *
       9                 :  *
      10                 :  * IDENTIFICATION
      11                 :  *    src/backend/storage/lmgr/lock.c
      12                 :  *
      13                 :  * NOTES
      14                 :  *    A lock table is a shared memory hash table.  When
      15                 :  *    a process tries to acquire a lock of a type that conflicts
      16                 :  *    with existing locks, it is put to sleep using the routines
      17                 :  *    in storage/lmgr/proc.c.
      18                 :  *
      19                 :  *    For the most part, this code should be invoked via lmgr.c
      20                 :  *    or another lock-management module, not directly.
      21                 :  *
      22                 :  *  Interface:
      23                 :  *
      24                 :  *  InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
      25                 :  *  LockAcquire(), LockRelease(), LockReleaseAll(),
      26                 :  *  LockCheckConflicts(), GrantLock()
      27                 :  *
      28                 :  *-------------------------------------------------------------------------
      29                 :  */
      30                 : #include "postgres.h"
      31                 : 
      32                 : #include <signal.h>
      33                 : #include <unistd.h>
      34                 : 
      35                 : #include "access/transam.h"
      36                 : #include "access/twophase.h"
      37                 : #include "access/twophase_rmgr.h"
      38                 : #include "access/xact.h"
      39                 : #include "access/xlog.h"
      40                 : #include "access/xlogutils.h"
      41                 : #include "miscadmin.h"
      42                 : #include "pg_trace.h"
      43                 : #include "pgstat.h"
      44                 : #include "storage/proc.h"
      45                 : #include "storage/procarray.h"
      46                 : #include "storage/sinvaladt.h"
      47                 : #include "storage/spin.h"
      48                 : #include "storage/standby.h"
      49                 : #include "utils/memutils.h"
      50                 : #include "utils/ps_status.h"
      51                 : #include "utils/resowner_private.h"
      52                 : 
      53                 : 
      54                 : /* This configuration variable is used to set the lock table size */
      55                 : int         max_locks_per_xact; /* set by guc.c */
      56                 : 
      57                 : #define NLOCKENTS() \
      58                 :     mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
      59                 : 
      60                 : 
      61                 : /*
      62                 :  * Data structures defining the semantics of the standard lock methods.
      63                 :  *
      64                 :  * The conflict table defines the semantics of the various lock modes.
      65                 :  */
      66                 : static const LOCKMASK LockConflicts[] = {
      67                 :     0,
      68                 : 
      69                 :     /* AccessShareLock */
      70                 :     LOCKBIT_ON(AccessExclusiveLock),
      71                 : 
      72                 :     /* RowShareLock */
      73                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      74                 : 
      75                 :     /* RowExclusiveLock */
      76                 :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      77                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      78                 : 
      79                 :     /* ShareUpdateExclusiveLock */
      80                 :     LOCKBIT_ON(ShareUpdateExclusiveLock) |
      81                 :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      82                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      83                 : 
      84                 :     /* ShareLock */
      85                 :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      86                 :     LOCKBIT_ON(ShareRowExclusiveLock) |
      87                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      88                 : 
      89                 :     /* ShareRowExclusiveLock */
      90                 :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      91                 :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      92                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      93                 : 
      94                 :     /* ExclusiveLock */
      95                 :     LOCKBIT_ON(RowShareLock) |
      96                 :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
      97                 :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
      98                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock),
      99                 : 
     100                 :     /* AccessExclusiveLock */
     101                 :     LOCKBIT_ON(AccessShareLock) | LOCKBIT_ON(RowShareLock) |
     102                 :     LOCKBIT_ON(RowExclusiveLock) | LOCKBIT_ON(ShareUpdateExclusiveLock) |
     103                 :     LOCKBIT_ON(ShareLock) | LOCKBIT_ON(ShareRowExclusiveLock) |
     104                 :     LOCKBIT_ON(ExclusiveLock) | LOCKBIT_ON(AccessExclusiveLock)
     105                 : 
     106                 : };
     107                 : 
     108                 : /* Names of lock modes, for debug printouts */
     109                 : static const char *const lock_mode_names[] =
     110                 : {
     111                 :     "INVALID",
     112                 :     "AccessShareLock",
     113                 :     "RowShareLock",
     114                 :     "RowExclusiveLock",
     115                 :     "ShareUpdateExclusiveLock",
     116                 :     "ShareLock",
     117                 :     "ShareRowExclusiveLock",
     118                 :     "ExclusiveLock",
     119                 :     "AccessExclusiveLock"
     120                 : };
     121                 : 
     122                 : #ifndef LOCK_DEBUG
     123                 : static bool Dummy_trace = false;
     124                 : #endif
     125                 : 
     126                 : static const LockMethodData default_lockmethod = {
     127                 :     MaxLockMode,
     128                 :     LockConflicts,
     129                 :     lock_mode_names,
     130                 : #ifdef LOCK_DEBUG
     131                 :     &Trace_locks
     132                 : #else
     133                 :     &Dummy_trace
     134                 : #endif
     135                 : };
     136                 : 
     137                 : static const LockMethodData user_lockmethod = {
     138                 :     MaxLockMode,
     139                 :     LockConflicts,
     140                 :     lock_mode_names,
     141                 : #ifdef LOCK_DEBUG
     142                 :     &Trace_userlocks
     143                 : #else
     144                 :     &Dummy_trace
     145                 : #endif
     146                 : };
     147                 : 
     148                 : /*
     149                 :  * map from lock method id to the lock table data structures
     150                 :  */
     151                 : static const LockMethod LockMethods[] = {
     152                 :     NULL,
     153                 :     &default_lockmethod,
     154                 :     &user_lockmethod
     155                 : };
     156                 : 
     157                 : 
     158                 : /* Record that's written to 2PC state file when a lock is persisted */
     159                 : typedef struct TwoPhaseLockRecord
     160                 : {
     161                 :     LOCKTAG     locktag;
     162                 :     LOCKMODE    lockmode;
     163                 : } TwoPhaseLockRecord;
     164                 : 
     165                 : 
     166                 : /*
     167                 :  * Count of the number of fast path lock slots we believe to be used.  This
     168                 :  * might be higher than the real number if another backend has transferred
     169                 :  * our locks to the primary lock table, but it can never be lower than the
     170                 :  * real value, since only we can acquire locks on our own behalf.
     171                 :  */
     172                 : static int  FastPathLocalUseCount = 0;
     173                 : 
     174                 : /*
     175                 :  * Flag to indicate if the relation extension lock is held by this backend.
     176                 :  * This flag is used to ensure that while holding the relation extension lock
     177                 :  * we don't try to acquire a heavyweight lock on any other object.  This
     178                 :  * restriction implies that the relation extension lock won't ever participate
     179                 :  * in the deadlock cycle because we can never wait for any other heavyweight
     180                 :  * lock after acquiring this lock.
     181                 :  *
     182                 :  * Such a restriction is okay for relation extension locks as unlike other
     183                 :  * heavyweight locks these are not held till the transaction end.  These are
     184                 :  * taken for a short duration to extend a particular relation and then
     185                 :  * released.
     186                 :  */
     187                 : static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     188                 : 
     189                 : /*
     190                 :  * Flag to indicate if the page lock is held by this backend.  We don't
     191                 :  * acquire any other heavyweight lock while holding the page lock except for
     192                 :  * relation extension.  However, these locks are never taken in reverse order
     193                 :  * which implies that page locks will also never participate in the deadlock
     194                 :  * cycle.
     195                 :  *
     196                 :  * Similar to relation extension, page locks are also held for a short
     197                 :  * duration, so imposing such a restriction won't hurt.
     198                 :  */
     199                 : static bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
     200                 : 
     201                 : /* Macros for manipulating proc->fpLockBits */
     202                 : #define FAST_PATH_BITS_PER_SLOT         3
     203                 : #define FAST_PATH_LOCKNUMBER_OFFSET     1
     204                 : #define FAST_PATH_MASK                  ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
     205                 : #define FAST_PATH_GET_BITS(proc, n) \
     206                 :     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
     207                 : #define FAST_PATH_BIT_POSITION(n, l) \
     208                 :     (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
     209                 :      AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
     210                 :      AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
     211                 :      ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
     212                 : #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
     213                 :      (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
     214                 : #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
     215                 :      (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
     216                 : #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
     217                 :      ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
     218                 : 
     219                 : /*
     220                 :  * The fast-path lock mechanism is concerned only with relation locks on
     221                 :  * unshared relations by backends bound to a database.  The fast-path
     222                 :  * mechanism exists mostly to accelerate acquisition and release of locks
     223                 :  * that rarely conflict.  Because ShareUpdateExclusiveLock is
     224                 :  * self-conflicting, it can't use the fast-path mechanism; but it also does
     225                 :  * not conflict with any of the locks that do, so we can ignore it completely.
     226                 :  */
     227                 : #define EligibleForRelationFastPath(locktag, mode) \
     228                 :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     229                 :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     230                 :     (locktag)->locktag_field1 == MyDatabaseId && \
     231                 :     MyDatabaseId != InvalidOid && \
     232                 :     (mode) < ShareUpdateExclusiveLock)
     233                 : #define ConflictsWithRelationFastPath(locktag, mode) \
     234                 :     ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
     235                 :     (locktag)->locktag_type == LOCKTAG_RELATION && \
     236                 :     (locktag)->locktag_field1 != InvalidOid && \
     237                 :     (mode) > ShareUpdateExclusiveLock)
     238                 : 
     239                 : static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
     240                 : static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
     241                 : static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
     242                 :                                           const LOCKTAG *locktag, uint32 hashcode);
     243                 : static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
     244                 : 
     245                 : /*
     246                 :  * To make the fast-path lock mechanism work, we must have some way of
     247                 :  * preventing the use of the fast-path when a conflicting lock might be present.
     248                 :  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
     249                 :  * and maintain an integer count of the number of "strong" lockers
     250                 :  * in each partition.  When any "strong" lockers are present (which is
     251                 :  * hopefully not very often), the fast-path mechanism can't be used, and we
     252                 :  * must fall back to the slower method of pushing matching locks directly
     253                 :  * into the main lock tables.
     254                 :  *
     255                 :  * The deadlock detector does not know anything about the fast path mechanism,
     256                 :  * so any locks that might be involved in a deadlock must be transferred from
     257                 :  * the fast-path queues to the main lock table.
     258                 :  */
     259                 : 
     260                 : #define FAST_PATH_STRONG_LOCK_HASH_BITS         10
     261                 : #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
     262                 :     (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
     263                 : #define FastPathStrongLockHashPartition(hashcode) \
     264                 :     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
     265                 : 
     266                 : typedef struct
     267                 : {
     268                 :     slock_t     mutex;
     269                 :     uint32      count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
     270                 : } FastPathStrongRelationLockData;
     271                 : 
     272                 : static volatile FastPathStrongRelationLockData *FastPathStrongRelationLocks;
     273                 : 
     274                 : 
     275                 : /*
     276                 :  * Pointers to hash tables containing lock state
     277                 :  *
     278                 :  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
     279                 :  * shared memory; LockMethodLocalHash is local to each backend.
     280                 :  */
     281                 : static HTAB *LockMethodLockHash;
     282                 : static HTAB *LockMethodProcLockHash;
     283                 : static HTAB *LockMethodLocalHash;
     284                 : 
     285                 : 
     286                 : /* private state for error cleanup */
     287                 : static LOCALLOCK *StrongLockInProgress;
     288                 : static LOCALLOCK *awaitedLock;
     289                 : static ResourceOwner awaitedOwner;
     290                 : 
     291                 : 
     292                 : #ifdef LOCK_DEBUG
     293                 : 
     294                 : /*------
     295                 :  * The following configuration options are available for lock debugging:
     296                 :  *
     297                 :  *     TRACE_LOCKS      -- give a bunch of output what's going on in this file
     298                 :  *     TRACE_USERLOCKS  -- same but for user locks
     299                 :  *     TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
     300                 :  *                         (use to avoid output on system tables)
     301                 :  *     TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
     302                 :  *     DEBUG_DEADLOCKS  -- currently dumps locks at untimely occasions ;)
     303                 :  *
     304                 :  * Furthermore, but in storage/lmgr/lwlock.c:
     305                 :  *     TRACE_LWLOCKS    -- trace lightweight locks (pretty useless)
     306                 :  *
     307                 :  * Define LOCK_DEBUG at compile time to get all these enabled.
     308                 :  * --------
     309                 :  */
     310                 : 
     311                 : int         Trace_lock_oidmin = FirstNormalObjectId;
     312                 : bool        Trace_locks = false;
     313                 : bool        Trace_userlocks = false;
     314                 : int         Trace_lock_table = 0;
     315                 : bool        Debug_deadlocks = false;
     316                 : 
     317                 : 
     318                 : inline static bool
     319                 : LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
     320                 : {
     321                 :     return
     322                 :         (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
     323                 :          ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
     324                 :         || (Trace_lock_table &&
     325                 :             (tag->locktag_field2 == Trace_lock_table));
     326                 : }
     327                 : 
     328                 : 
     329                 : inline static void
     330                 : LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
     331                 : {
     332                 :     if (LOCK_DEBUG_ENABLED(&lock->tag))
     333                 :         elog(LOG,
     334                 :              "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
     335                 :              "req(%d,%d,%d,%d,%d,%d,%d)=%d "
     336                 :              "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
     337                 :              where, lock,
     338                 :              lock->tag.locktag_field1, lock->tag.locktag_field2,
     339                 :              lock->tag.locktag_field3, lock->tag.locktag_field4,
     340                 :              lock->tag.locktag_type, lock->tag.locktag_lockmethodid,
     341                 :              lock->grantMask,
     342                 :              lock->requested[1], lock->requested[2], lock->requested[3],
     343                 :              lock->requested[4], lock->requested[5], lock->requested[6],
     344                 :              lock->requested[7], lock->nRequested,
     345                 :              lock->granted[1], lock->granted[2], lock->granted[3],
     346                 :              lock->granted[4], lock->granted[5], lock->granted[6],
     347                 :              lock->granted[7], lock->nGranted,
     348                 :              dclist_count(&lock->waitProcs),
     349                 :              LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
     350                 : }
     351                 : 
     352                 : 
     353                 : inline static void
     354                 : PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
     355                 : {
     356                 :     if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
     357                 :         elog(LOG,
     358                 :              "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
     359                 :              where, proclockP, proclockP->tag.myLock,
     360                 :              PROCLOCK_LOCKMETHOD(*(proclockP)),
     361                 :              proclockP->tag.myProc, (int) proclockP->holdMask);
     362                 : }
     363                 : #else                           /* not LOCK_DEBUG */
     364                 : 
     365                 : #define LOCK_PRINT(where, lock, type)  ((void) 0)
     366                 : #define PROCLOCK_PRINT(where, proclockP)  ((void) 0)
     367                 : #endif                          /* not LOCK_DEBUG */
     368                 : 
     369                 : 
     370                 : static uint32 proclock_hash(const void *key, Size keysize);
     371                 : static void RemoveLocalLock(LOCALLOCK *locallock);
     372                 : static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
     373                 :                                   const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
     374                 : static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
     375                 : static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
     376                 : static void FinishStrongLockAcquire(void);
     377                 : static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
     378                 : static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
     379                 : static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
     380                 : static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
     381                 :                         PROCLOCK *proclock, LockMethod lockMethodTable);
     382                 : static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
     383                 :                         LockMethod lockMethodTable, uint32 hashcode,
     384                 :                         bool wakeupNeeded);
     385                 : static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
     386                 :                                  LOCKTAG *locktag, LOCKMODE lockmode,
     387                 :                                  bool decrement_strong_lock_count);
     388                 : static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
     389                 :                                            BlockedProcsData *data);
     390                 : 
     391                 : 
     392                 : /*
     393                 :  * InitLocks -- Initialize the lock manager's data structures.
     394                 :  *
     395                 :  * This is called from CreateSharedMemoryAndSemaphores(), which see for
     396                 :  * more comments.  In the normal postmaster case, the shared hash tables
     397                 :  * are created here, as well as a locallock hash table that will remain
     398                 :  * unused and empty in the postmaster itself.  Backends inherit the pointers
     399                 :  * to the shared tables via fork(), and also inherit an image of the locallock
     400                 :  * hash table, which they proceed to use.  In the EXEC_BACKEND case, each
     401                 :  * backend re-executes this code to obtain pointers to the already existing
     402                 :  * shared hash tables and to create its locallock hash table.
     403                 :  */
     404                 : void
     405 CBC        1826 : InitLocks(void)
     406                 : {
     407                 :     HASHCTL     info;
     408                 :     long        init_table_size,
     409                 :                 max_table_size;
     410                 :     bool        found;
     411                 : 
     412                 :     /*
     413                 :      * Compute init/max size to request for lock hashtables.  Note these
     414                 :      * calculations must agree with LockShmemSize!
     415                 :      */
     416            1826 :     max_table_size = NLOCKENTS();
     417            1826 :     init_table_size = max_table_size / 2;
     418                 : 
     419                 :     /*
     420                 :      * Allocate hash table for LOCK structs.  This stores per-locked-object
     421                 :      * information.
     422                 :      */
     423            1826 :     info.keysize = sizeof(LOCKTAG);
     424            1826 :     info.entrysize = sizeof(LOCK);
     425            1826 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     426                 : 
     427            1826 :     LockMethodLockHash = ShmemInitHash("LOCK hash",
     428                 :                                        init_table_size,
     429                 :                                        max_table_size,
     430                 :                                        &info,
     431                 :                                        HASH_ELEM | HASH_BLOBS | HASH_PARTITION);
     432                 : 
     433                 :     /* Assume an average of 2 holders per lock */
     434            1826 :     max_table_size *= 2;
     435            1826 :     init_table_size *= 2;
     436                 : 
     437                 :     /*
     438                 :      * Allocate hash table for PROCLOCK structs.  This stores
     439                 :      * per-lock-per-holder information.
     440                 :      */
     441            1826 :     info.keysize = sizeof(PROCLOCKTAG);
     442            1826 :     info.entrysize = sizeof(PROCLOCK);
     443            1826 :     info.hash = proclock_hash;
     444            1826 :     info.num_partitions = NUM_LOCK_PARTITIONS;
     445                 : 
     446            1826 :     LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
     447                 :                                            init_table_size,
     448                 :                                            max_table_size,
     449                 :                                            &info,
     450                 :                                            HASH_ELEM | HASH_FUNCTION | HASH_PARTITION);
     451                 : 
     452                 :     /*
     453                 :      * Allocate fast-path structures.
     454                 :      */
     455            1826 :     FastPathStrongRelationLocks =
     456            1826 :         ShmemInitStruct("Fast Path Strong Relation Lock Data",
     457                 :                         sizeof(FastPathStrongRelationLockData), &found);
     458            1826 :     if (!found)
     459            1826 :         SpinLockInit(&FastPathStrongRelationLocks->mutex);
     460                 : 
     461                 :     /*
     462                 :      * Allocate non-shared hash table for LOCALLOCK structs.  This stores lock
     463                 :      * counts and resource owner information.
     464                 :      *
     465                 :      * The non-shared table could already exist in this process (this occurs
     466                 :      * when the postmaster is recreating shared memory after a backend crash).
     467                 :      * If so, delete and recreate it.  (We could simply leave it, since it
     468                 :      * ought to be empty in the postmaster, but for safety let's zap it.)
     469                 :      */
     470            1826 :     if (LockMethodLocalHash)
     471               4 :         hash_destroy(LockMethodLocalHash);
     472                 : 
     473            1826 :     info.keysize = sizeof(LOCALLOCKTAG);
     474            1826 :     info.entrysize = sizeof(LOCALLOCK);
     475                 : 
     476            1826 :     LockMethodLocalHash = hash_create("LOCALLOCK hash",
     477                 :                                       16,
     478                 :                                       &info,
     479                 :                                       HASH_ELEM | HASH_BLOBS);
     480            1826 : }
     481                 : 
     482                 : 
     483                 : /*
     484                 :  * Fetch the lock method table associated with a given lock
     485                 :  */
     486                 : LockMethod
     487              93 : GetLocksMethodTable(const LOCK *lock)
     488                 : {
     489              93 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
     490                 : 
     491              93 :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     492              93 :     return LockMethods[lockmethodid];
     493                 : }
     494                 : 
     495                 : /*
     496                 :  * Fetch the lock method table associated with a given locktag
     497                 :  */
     498                 : LockMethod
     499            1089 : GetLockTagsMethodTable(const LOCKTAG *locktag)
     500                 : {
     501            1089 :     LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
     502                 : 
     503            1089 :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
     504            1089 :     return LockMethods[lockmethodid];
     505                 : }
     506                 : 
     507                 : 
     508                 : /*
     509                 :  * Compute the hash code associated with a LOCKTAG.
     510                 :  *
     511                 :  * To avoid unnecessary recomputations of the hash code, we try to do this
     512                 :  * just once per function, and then pass it around as needed.  Aside from
     513                 :  * passing the hashcode to hash_search_with_hash_value(), we can extract
     514                 :  * the lock partition number from the hashcode.
     515                 :  */
     516                 : uint32
     517        27079921 : LockTagHashCode(const LOCKTAG *locktag)
     518                 : {
     519        27079921 :     return get_hash_value(LockMethodLockHash, (const void *) locktag);
     520                 : }
     521                 : 
     522                 : /*
     523                 :  * Compute the hash code associated with a PROCLOCKTAG.
     524                 :  *
     525                 :  * Because we want to use just one set of partition locks for both the
     526                 :  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
     527                 :  * fall into the same partition number as their associated LOCKs.
     528                 :  * dynahash.c expects the partition number to be the low-order bits of
     529                 :  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
     530                 :  * same low-order bits as the associated LOCKTAG's hash code.  We achieve
     531                 :  * this with this specialized hash function.
     532                 :  */
     533                 : static uint32
     534             799 : proclock_hash(const void *key, Size keysize)
     535                 : {
     536             799 :     const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
     537                 :     uint32      lockhash;
     538                 :     Datum       procptr;
     539                 : 
     540             799 :     Assert(keysize == sizeof(PROCLOCKTAG));
     541                 : 
     542                 :     /* Look into the associated LOCK object, and compute its hash code */
     543             799 :     lockhash = LockTagHashCode(&proclocktag->myLock->tag);
     544                 : 
     545                 :     /*
     546                 :      * To make the hash code also depend on the PGPROC, we xor the proc
     547                 :      * struct's address into the hash code, left-shifted so that the
     548                 :      * partition-number bits don't change.  Since this is only a hash, we
     549                 :      * don't care if we lose high-order bits of the address; use an
     550                 :      * intermediate variable to suppress cast-pointer-to-int warnings.
     551                 :      */
     552             799 :     procptr = PointerGetDatum(proclocktag->myProc);
     553             799 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     554                 : 
     555             799 :     return lockhash;
     556                 : }
     557                 : 
     558                 : /*
     559                 :  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
     560                 :  * for its underlying LOCK.
     561                 :  *
     562                 :  * We use this just to avoid redundant calls of LockTagHashCode().
     563                 :  */
     564                 : static inline uint32
     565         6745001 : ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
     566                 : {
     567         6745001 :     uint32      lockhash = hashcode;
     568                 :     Datum       procptr;
     569                 : 
     570                 :     /*
     571                 :      * This must match proclock_hash()!
     572                 :      */
     573         6745001 :     procptr = PointerGetDatum(proclocktag->myProc);
     574         6745001 :     lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
     575                 : 
     576         6745001 :     return lockhash;
     577                 : }
     578                 : 
     579                 : /*
     580                 :  * Given two lock modes, return whether they would conflict.
     581                 :  */
     582                 : bool
     583          132574 : DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
     584                 : {
     585          132574 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
     586                 : 
     587          132574 :     if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
     588          132478 :         return true;
     589                 : 
     590              96 :     return false;
     591                 : }
     592                 : 
     593                 : /*
     594                 :  * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
     595                 :  *      by the current transaction
     596                 :  */
     597                 : bool
     598         5429526 : LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
     599                 : {
     600                 :     LOCALLOCKTAG localtag;
     601                 :     LOCALLOCK  *locallock;
     602                 : 
     603                 :     /*
     604                 :      * See if there is a LOCALLOCK entry for this lock and lockmode
     605                 :      */
     606         5429526 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     607         5429526 :     localtag.lock = *locktag;
     608         5429526 :     localtag.mode = lockmode;
     609                 : 
     610         5429526 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     611                 :                                           &localtag,
     612                 :                                           HASH_FIND, NULL);
     613                 : 
     614         5429526 :     return (locallock && locallock->nLocks > 0);
     615                 : }
     616                 : 
     617                 : #ifdef USE_ASSERT_CHECKING
     618                 : /*
     619                 :  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
     620                 :  *      evaluate assertions based on all locks held.
     621                 :  */
     622                 : HTAB *
     623            5329 : GetLockMethodLocalHash(void)
     624                 : {
     625            5329 :     return LockMethodLocalHash;
     626                 : }
     627                 : #endif
     628                 : 
     629                 : /*
     630                 :  * LockHasWaiters -- look up 'locktag' and check if releasing this
     631                 :  *      lock would wake up other processes waiting for it.
     632                 :  */
     633                 : bool
     634 UBC           0 : LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
     635                 : {
     636               0 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     637                 :     LockMethod  lockMethodTable;
     638                 :     LOCALLOCKTAG localtag;
     639                 :     LOCALLOCK  *locallock;
     640                 :     LOCK       *lock;
     641                 :     PROCLOCK   *proclock;
     642                 :     LWLock     *partitionLock;
     643               0 :     bool        hasWaiters = false;
     644                 : 
     645               0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     646               0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     647               0 :     lockMethodTable = LockMethods[lockmethodid];
     648               0 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     649               0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     650                 : 
     651                 : #ifdef LOCK_DEBUG
     652                 :     if (LOCK_DEBUG_ENABLED(locktag))
     653                 :         elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
     654                 :              locktag->locktag_field1, locktag->locktag_field2,
     655                 :              lockMethodTable->lockModeNames[lockmode]);
     656                 : #endif
     657                 : 
     658                 :     /*
     659                 :      * Find the LOCALLOCK entry for this lock and lockmode
     660                 :      */
     661               0 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     662               0 :     localtag.lock = *locktag;
     663               0 :     localtag.mode = lockmode;
     664                 : 
     665               0 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     666                 :                                           &localtag,
     667                 :                                           HASH_FIND, NULL);
     668                 : 
     669                 :     /*
     670                 :      * let the caller print its own error message, too. Do not ereport(ERROR).
     671                 :      */
     672               0 :     if (!locallock || locallock->nLocks <= 0)
     673                 :     {
     674               0 :         elog(WARNING, "you don't own a lock of type %s",
     675                 :              lockMethodTable->lockModeNames[lockmode]);
     676               0 :         return false;
     677                 :     }
     678                 : 
     679                 :     /*
     680                 :      * Check the shared lock table.
     681                 :      */
     682               0 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
     683                 : 
     684               0 :     LWLockAcquire(partitionLock, LW_SHARED);
     685                 : 
     686                 :     /*
     687                 :      * We don't need to re-find the lock or proclock, since we kept their
     688                 :      * addresses in the locallock table, and they couldn't have been removed
     689                 :      * while we were holding a lock on them.
     690                 :      */
     691               0 :     lock = locallock->lock;
     692                 :     LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
     693               0 :     proclock = locallock->proclock;
     694                 :     PROCLOCK_PRINT("LockHasWaiters: found", proclock);
     695                 : 
     696                 :     /*
     697                 :      * Double-check that we are actually holding a lock of the type we want to
     698                 :      * release.
     699                 :      */
     700               0 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
     701                 :     {
     702                 :         PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
     703               0 :         LWLockRelease(partitionLock);
     704               0 :         elog(WARNING, "you don't own a lock of type %s",
     705                 :              lockMethodTable->lockModeNames[lockmode]);
     706               0 :         RemoveLocalLock(locallock);
     707               0 :         return false;
     708                 :     }
     709                 : 
     710                 :     /*
     711                 :      * Do the checking.
     712                 :      */
     713               0 :     if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
     714               0 :         hasWaiters = true;
     715                 : 
     716               0 :     LWLockRelease(partitionLock);
     717                 : 
     718               0 :     return hasWaiters;
     719                 : }
     720                 : 
     721                 : /*
     722                 :  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
     723                 :  *      set lock if/when no conflicts.
     724                 :  *
     725                 :  * Inputs:
     726                 :  *  locktag: unique identifier for the lockable object
     727                 :  *  lockmode: lock mode to acquire
     728                 :  *  sessionLock: if true, acquire lock for session not current transaction
     729                 :  *  dontWait: if true, don't wait to acquire lock
     730                 :  *
     731                 :  * Returns one of:
     732                 :  *      LOCKACQUIRE_NOT_AVAIL       lock not available, and dontWait=true
     733                 :  *      LOCKACQUIRE_OK              lock successfully acquired
     734                 :  *      LOCKACQUIRE_ALREADY_HELD    incremented count for lock already held
     735                 :  *      LOCKACQUIRE_ALREADY_CLEAR   incremented count for lock already clear
     736                 :  *
     737                 :  * In the normal case where dontWait=false and the caller doesn't need to
     738                 :  * distinguish a freshly acquired lock from one already taken earlier in
     739                 :  * this same transaction, there is no need to examine the return value.
     740                 :  *
     741                 :  * Side Effects: The lock is acquired and recorded in lock tables.
     742                 :  *
     743                 :  * NOTE: if we wait for the lock, there is no way to abort the wait
     744                 :  * short of aborting the transaction.
     745                 :  */
     746                 : LockAcquireResult
     747 CBC      945773 : LockAcquire(const LOCKTAG *locktag,
     748                 :             LOCKMODE lockmode,
     749                 :             bool sessionLock,
     750                 :             bool dontWait)
     751                 : {
     752          945773 :     return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
     753                 :                                true, NULL);
     754                 : }
     755                 : 
     756                 : /*
     757                 :  * LockAcquireExtended - allows us to specify additional options
     758                 :  *
     759                 :  * reportMemoryError specifies whether a lock request that fills the lock
     760                 :  * table should generate an ERROR or not.  Passing "false" allows the caller
     761                 :  * to attempt to recover from lock-table-full situations, perhaps by forcibly
     762                 :  * canceling other lock holders and then retrying.  Note, however, that the
     763                 :  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
     764                 :  * in combination with dontWait = true, as the cause of failure couldn't be
     765                 :  * distinguished.
     766                 :  *
     767                 :  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
     768                 :  * table entry if a lock is successfully acquired, or NULL if not.
     769                 :  */
     770                 : LockAcquireResult
     771        28610314 : LockAcquireExtended(const LOCKTAG *locktag,
     772                 :                     LOCKMODE lockmode,
     773                 :                     bool sessionLock,
     774                 :                     bool dontWait,
     775                 :                     bool reportMemoryError,
     776                 :                     LOCALLOCK **locallockp)
     777                 : {
     778        28610314 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
     779                 :     LockMethod  lockMethodTable;
     780                 :     LOCALLOCKTAG localtag;
     781                 :     LOCALLOCK  *locallock;
     782                 :     LOCK       *lock;
     783                 :     PROCLOCK   *proclock;
     784                 :     bool        found;
     785                 :     ResourceOwner owner;
     786                 :     uint32      hashcode;
     787                 :     LWLock     *partitionLock;
     788                 :     bool        found_conflict;
     789        28610314 :     bool        log_lock = false;
     790                 : 
     791        28610314 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
     792 UBC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
     793 CBC    28610314 :     lockMethodTable = LockMethods[lockmethodid];
     794        28610314 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
     795 UBC           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
     796                 : 
     797 CBC    28610314 :     if (RecoveryInProgress() && !InRecovery &&
     798          173708 :         (locktag->locktag_type == LOCKTAG_OBJECT ||
     799          173708 :          locktag->locktag_type == LOCKTAG_RELATION) &&
     800                 :         lockmode > RowExclusiveLock)
     801 UBC           0 :         ereport(ERROR,
     802                 :                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
     803                 :                  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
     804                 :                         lockMethodTable->lockModeNames[lockmode]),
     805                 :                  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
     806                 : 
     807                 : #ifdef LOCK_DEBUG
     808                 :     if (LOCK_DEBUG_ENABLED(locktag))
     809                 :         elog(LOG, "LockAcquire: lock [%u,%u] %s",
     810                 :              locktag->locktag_field1, locktag->locktag_field2,
     811                 :              lockMethodTable->lockModeNames[lockmode]);
     812                 : #endif
     813                 : 
     814                 :     /* Identify owner for lock */
     815 CBC    28610314 :     if (sessionLock)
     816           58222 :         owner = NULL;
     817                 :     else
     818        28552092 :         owner = CurrentResourceOwner;
     819                 : 
     820                 :     /*
     821                 :      * Find or create a LOCALLOCK entry for this lock and lockmode
     822                 :      */
     823        28610314 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
     824        28610314 :     localtag.lock = *locktag;
     825        28610314 :     localtag.mode = lockmode;
     826                 : 
     827        28610314 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
     828                 :                                           &localtag,
     829                 :                                           HASH_ENTER, &found);
     830                 : 
     831                 :     /*
     832                 :      * if it's a new locallock object, initialize it
     833                 :      */
     834        28610314 :     if (!found)
     835                 :     {
     836        26137790 :         locallock->lock = NULL;
     837        26137790 :         locallock->proclock = NULL;
     838        26137790 :         locallock->hashcode = LockTagHashCode(&(localtag.lock));
     839        26137790 :         locallock->nLocks = 0;
     840        26137790 :         locallock->holdsStrongLockCount = false;
     841        26137790 :         locallock->lockCleared = false;
     842        26137790 :         locallock->numLockOwners = 0;
     843        26137790 :         locallock->maxLockOwners = 8;
     844        26137790 :         locallock->lockOwners = NULL;    /* in case next line fails */
     845        26137790 :         locallock->lockOwners = (LOCALLOCKOWNER *)
     846        26137790 :             MemoryContextAlloc(TopMemoryContext,
     847        26137790 :                                locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
     848                 :     }
     849                 :     else
     850                 :     {
     851                 :         /* Make sure there will be room to remember the lock */
     852         2472524 :         if (locallock->numLockOwners >= locallock->maxLockOwners)
     853                 :         {
     854              19 :             int         newsize = locallock->maxLockOwners * 2;
     855                 : 
     856              19 :             locallock->lockOwners = (LOCALLOCKOWNER *)
     857              19 :                 repalloc(locallock->lockOwners,
     858                 :                          newsize * sizeof(LOCALLOCKOWNER));
     859              19 :             locallock->maxLockOwners = newsize;
     860                 :         }
     861                 :     }
     862        28610314 :     hashcode = locallock->hashcode;
     863                 : 
     864        28610314 :     if (locallockp)
     865        27664541 :         *locallockp = locallock;
     866                 : 
     867                 :     /*
     868                 :      * If we already hold the lock, we can just increase the count locally.
     869                 :      *
     870                 :      * If lockCleared is already set, caller need not worry about absorbing
     871                 :      * sinval messages related to the lock's object.
     872                 :      */
     873        28610314 :     if (locallock->nLocks > 0)
     874                 :     {
     875         2472524 :         GrantLockLocal(locallock, owner);
     876         2472524 :         if (locallock->lockCleared)
     877         2371290 :             return LOCKACQUIRE_ALREADY_CLEAR;
     878                 :         else
     879          101234 :             return LOCKACQUIRE_ALREADY_HELD;
     880                 :     }
     881                 : 
     882                 :     /*
     883                 :      * We don't acquire any other heavyweight lock while holding the relation
     884                 :      * extension lock.  We do allow to acquire the same relation extension
     885                 :      * lock more than once but that case won't reach here.
     886                 :      */
     887        26137790 :     Assert(!IsRelationExtensionLockHeld);
     888                 : 
     889                 :     /*
     890                 :      * We don't acquire any other heavyweight lock while holding the page lock
     891                 :      * except for relation extension.
     892                 :      */
     893        26137790 :     Assert(!IsPageLockHeld ||
     894                 :            (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
     895                 : 
     896                 :     /*
     897                 :      * Prepare to emit a WAL record if acquisition of this lock needs to be
     898                 :      * replayed in a standby server.
     899                 :      *
     900                 :      * Here we prepare to log; after lock is acquired we'll issue log record.
     901                 :      * This arrangement simplifies error recovery in case the preparation step
     902                 :      * fails.
     903                 :      *
     904                 :      * Only AccessExclusiveLocks can conflict with lock types that read-only
     905                 :      * transactions can acquire in a standby server. Make sure this definition
     906                 :      * matches the one in GetRunningTransactionLocks().
     907                 :      */
     908        26137790 :     if (lockmode >= AccessExclusiveLock &&
     909          299428 :         locktag->locktag_type == LOCKTAG_RELATION &&
     910          240460 :         !RecoveryInProgress() &&
     911          221317 :         XLogStandbyInfoActive())
     912                 :     {
     913          197730 :         LogAccessExclusiveLockPrepare();
     914          197730 :         log_lock = true;
     915                 :     }
     916                 : 
     917                 :     /*
     918                 :      * Attempt to take lock via fast path, if eligible.  But if we remember
     919                 :      * having filled up the fast path array, we don't attempt to make any
     920                 :      * further use of it until we release some locks.  It's possible that some
     921                 :      * other backend has transferred some of those locks to the shared hash
     922                 :      * table, leaving space free, but it's not worth acquiring the LWLock just
     923                 :      * to check.  It's also possible that we're acquiring a second or third
     924                 :      * lock type on a relation we have already locked using the fast-path, but
     925                 :      * for now we don't worry about that case either.
     926                 :      */
     927        26137790 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
     928        23674570 :         FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
     929                 :     {
     930        23455377 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     931                 :         bool        acquired;
     932                 : 
     933                 :         /*
     934                 :          * LWLockAcquire acts as a memory sequencing point, so it's safe to
     935                 :          * assume that any strong locker whose increment to
     936                 :          * FastPathStrongRelationLocks->counts becomes visible after we test
     937                 :          * it has yet to begin to transfer fast-path locks.
     938                 :          */
     939        23455377 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
     940        23455377 :         if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
     941         1358459 :             acquired = false;
     942                 :         else
     943        22096918 :             acquired = FastPathGrantRelationLock(locktag->locktag_field2,
     944                 :                                                  lockmode);
     945        23455377 :         LWLockRelease(&MyProc->fpInfoLock);
     946        23455377 :         if (acquired)
     947                 :         {
     948                 :             /*
     949                 :              * The locallock might contain stale pointers to some old shared
     950                 :              * objects; we MUST reset these to null before considering the
     951                 :              * lock to be acquired via fast-path.
     952                 :              */
     953        22096918 :             locallock->lock = NULL;
     954        22096918 :             locallock->proclock = NULL;
     955        22096918 :             GrantLockLocal(locallock, owner);
     956        22096918 :             return LOCKACQUIRE_OK;
     957                 :         }
     958                 :     }
     959                 : 
     960                 :     /*
     961                 :      * If this lock could potentially have been taken via the fast-path by
     962                 :      * some other backend, we must (temporarily) disable further use of the
     963                 :      * fast-path for this lock tag, and migrate any locks already taken via
     964                 :      * this method to the main lock table.
     965                 :      */
     966         4040872 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
     967                 :     {
     968          271615 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
     969                 : 
     970          271615 :         BeginStrongLockAcquire(locallock, fasthashcode);
     971          271615 :         if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
     972                 :                                            hashcode))
     973                 :         {
     974 UBC           0 :             AbortStrongLockAcquire();
     975               0 :             if (locallock->nLocks == 0)
     976               0 :                 RemoveLocalLock(locallock);
     977               0 :             if (locallockp)
     978               0 :                 *locallockp = NULL;
     979               0 :             if (reportMemoryError)
     980               0 :                 ereport(ERROR,
     981                 :                         (errcode(ERRCODE_OUT_OF_MEMORY),
     982                 :                          errmsg("out of shared memory"),
     983                 :                          errhint("You might need to increase max_locks_per_transaction.")));
     984                 :             else
     985               0 :                 return LOCKACQUIRE_NOT_AVAIL;
     986                 :         }
     987                 :     }
     988                 : 
     989                 :     /*
     990                 :      * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
     991                 :      * take it via the fast-path, either, so we've got to mess with the shared
     992                 :      * lock table.
     993                 :      */
     994 CBC     4040872 :     partitionLock = LockHashPartitionLock(hashcode);
     995                 : 
     996         4040872 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
     997                 : 
     998                 :     /*
     999                 :      * Find or create lock and proclock entries with this tag
    1000                 :      *
    1001                 :      * Note: if the locallock object already existed, it might have a pointer
    1002                 :      * to the lock already ... but we should not assume that that pointer is
    1003                 :      * valid, since a lock object with zero hold and request counts can go
    1004                 :      * away anytime.  So we have to use SetupLockInTable() to recompute the
    1005                 :      * lock and proclock pointers, even if they're already set.
    1006                 :      */
    1007         4040872 :     proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    1008                 :                                 hashcode, lockmode);
    1009         4040872 :     if (!proclock)
    1010                 :     {
    1011 UBC           0 :         AbortStrongLockAcquire();
    1012               0 :         LWLockRelease(partitionLock);
    1013               0 :         if (locallock->nLocks == 0)
    1014               0 :             RemoveLocalLock(locallock);
    1015               0 :         if (locallockp)
    1016               0 :             *locallockp = NULL;
    1017               0 :         if (reportMemoryError)
    1018               0 :             ereport(ERROR,
    1019                 :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    1020                 :                      errmsg("out of shared memory"),
    1021                 :                      errhint("You might need to increase max_locks_per_transaction.")));
    1022                 :         else
    1023               0 :             return LOCKACQUIRE_NOT_AVAIL;
    1024                 :     }
    1025 CBC     4040872 :     locallock->proclock = proclock;
    1026         4040872 :     lock = proclock->tag.myLock;
    1027         4040872 :     locallock->lock = lock;
    1028                 : 
    1029                 :     /*
    1030                 :      * If lock requested conflicts with locks requested by waiters, must join
    1031                 :      * wait queue.  Otherwise, check for conflict with already-held locks.
    1032                 :      * (That's last because most complex check.)
    1033                 :      */
    1034         4040872 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1035              26 :         found_conflict = true;
    1036                 :     else
    1037         4040846 :         found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
    1038                 :                                             lock, proclock);
    1039                 : 
    1040         4040872 :     if (!found_conflict)
    1041                 :     {
    1042                 :         /* No conflict with held or previously requested locks */
    1043         4039195 :         GrantLock(lock, proclock, lockmode);
    1044         4039195 :         GrantLockLocal(locallock, owner);
    1045                 :     }
    1046                 :     else
    1047                 :     {
    1048                 :         /*
    1049                 :          * We can't acquire the lock immediately.  If caller specified no
    1050                 :          * blocking, remove useless table entries and return
    1051                 :          * LOCKACQUIRE_NOT_AVAIL without waiting.
    1052                 :          */
    1053            1677 :         if (dontWait)
    1054                 :         {
    1055             647 :             AbortStrongLockAcquire();
    1056             647 :             if (proclock->holdMask == 0)
    1057                 :             {
    1058                 :                 uint32      proclock_hashcode;
    1059                 : 
    1060             445 :                 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1061 GNC         445 :                 dlist_delete(&proclock->lockLink);
    1062             445 :                 dlist_delete(&proclock->procLink);
    1063 CBC         445 :                 if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1064 GNC         445 :                                                  &(proclock->tag),
    1065                 :                                                  proclock_hashcode,
    1066                 :                                                  HASH_REMOVE,
    1067                 :                                                  NULL))
    1068 UBC           0 :                     elog(PANIC, "proclock table corrupted");
    1069                 :             }
    1070                 :             else
    1071                 :                 PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
    1072 CBC         647 :             lock->nRequested--;
    1073             647 :             lock->requested[lockmode]--;
    1074                 :             LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
    1075             647 :             Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
    1076             647 :             Assert(lock->nGranted <= lock->nRequested);
    1077             647 :             LWLockRelease(partitionLock);
    1078             647 :             if (locallock->nLocks == 0)
    1079             647 :                 RemoveLocalLock(locallock);
    1080             647 :             if (locallockp)
    1081             220 :                 *locallockp = NULL;
    1082             647 :             return LOCKACQUIRE_NOT_AVAIL;
    1083                 :         }
    1084                 : 
    1085                 :         /*
    1086                 :          * Set bitmask of locks this process already holds on this object.
    1087                 :          */
    1088            1030 :         MyProc->heldLocks = proclock->holdMask;
    1089                 : 
    1090                 :         /*
    1091                 :          * Sleep till someone wakes me up.
    1092                 :          */
    1093                 : 
    1094                 :         TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
    1095                 :                                          locktag->locktag_field2,
    1096                 :                                          locktag->locktag_field3,
    1097                 :                                          locktag->locktag_field4,
    1098                 :                                          locktag->locktag_type,
    1099                 :                                          lockmode);
    1100                 : 
    1101            1030 :         WaitOnLock(locallock, owner);
    1102                 : 
    1103                 :         TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
    1104                 :                                         locktag->locktag_field2,
    1105                 :                                         locktag->locktag_field3,
    1106                 :                                         locktag->locktag_field4,
    1107                 :                                         locktag->locktag_type,
    1108                 :                                         lockmode);
    1109                 : 
    1110                 :         /*
    1111                 :          * NOTE: do not do any material change of state between here and
    1112                 :          * return.  All required changes in locktable state must have been
    1113                 :          * done when the lock was granted to us --- see notes in WaitOnLock.
    1114                 :          */
    1115                 : 
    1116                 :         /*
    1117                 :          * Check the proclock entry status, in case something in the ipc
    1118                 :          * communication doesn't work correctly.
    1119                 :          */
    1120             983 :         if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    1121                 :         {
    1122 UBC           0 :             AbortStrongLockAcquire();
    1123                 :             PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
    1124                 :             LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
    1125                 :             /* Should we retry ? */
    1126               0 :             LWLockRelease(partitionLock);
    1127               0 :             elog(ERROR, "LockAcquire failed");
    1128                 :         }
    1129                 :         PROCLOCK_PRINT("LockAcquire: granted", proclock);
    1130                 :         LOCK_PRINT("LockAcquire: granted", lock, lockmode);
    1131                 :     }
    1132                 : 
    1133                 :     /*
    1134                 :      * Lock state is fully up-to-date now; if we error out after this, no
    1135                 :      * special error cleanup is required.
    1136                 :      */
    1137 CBC     4040178 :     FinishStrongLockAcquire();
    1138                 : 
    1139         4040178 :     LWLockRelease(partitionLock);
    1140                 : 
    1141                 :     /*
    1142                 :      * Emit a WAL record if acquisition of this lock needs to be replayed in a
    1143                 :      * standby server.
    1144                 :      */
    1145         4040178 :     if (log_lock)
    1146                 :     {
    1147                 :         /*
    1148                 :          * Decode the locktag back to the original values, to avoid sending
    1149                 :          * lots of empty bytes with every message.  See lock.h to check how a
    1150                 :          * locktag is defined for LOCKTAG_RELATION
    1151                 :          */
    1152          197518 :         LogAccessExclusiveLock(locktag->locktag_field1,
    1153          197518 :                                locktag->locktag_field2);
    1154                 :     }
    1155                 : 
    1156         4040178 :     return LOCKACQUIRE_OK;
    1157                 : }
    1158                 : 
    1159                 : /*
    1160                 :  * Find or create LOCK and PROCLOCK objects as needed for a new lock
    1161                 :  * request.
    1162                 :  *
    1163                 :  * Returns the PROCLOCK object, or NULL if we failed to create the objects
    1164                 :  * for lack of shared memory.
    1165                 :  *
    1166                 :  * The appropriate partition lock must be held at entry, and will be
    1167                 :  * held at exit.
    1168                 :  */
    1169                 : static PROCLOCK *
    1170         4042461 : SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
    1171                 :                  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
    1172                 : {
    1173                 :     LOCK       *lock;
    1174                 :     PROCLOCK   *proclock;
    1175                 :     PROCLOCKTAG proclocktag;
    1176                 :     uint32      proclock_hashcode;
    1177                 :     bool        found;
    1178                 : 
    1179                 :     /*
    1180                 :      * Find or create a lock with this tag.
    1181                 :      */
    1182         4042461 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    1183                 :                                                 locktag,
    1184                 :                                                 hashcode,
    1185                 :                                                 HASH_ENTER_NULL,
    1186                 :                                                 &found);
    1187         4042461 :     if (!lock)
    1188 UBC           0 :         return NULL;
    1189                 : 
    1190                 :     /*
    1191                 :      * if it's a new lock object, initialize it
    1192                 :      */
    1193 CBC     4042461 :     if (!found)
    1194                 :     {
    1195         2677474 :         lock->grantMask = 0;
    1196         2677474 :         lock->waitMask = 0;
    1197 GNC     2677474 :         dlist_init(&lock->procLocks);
    1198         2677474 :         dclist_init(&lock->waitProcs);
    1199 CBC     2677474 :         lock->nRequested = 0;
    1200         2677474 :         lock->nGranted = 0;
    1201        16064844 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    1202         2677474 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    1203                 :         LOCK_PRINT("LockAcquire: new", lock, lockmode);
    1204                 :     }
    1205                 :     else
    1206                 :     {
    1207                 :         LOCK_PRINT("LockAcquire: found", lock, lockmode);
    1208         1364987 :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    1209         1364987 :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    1210         1364987 :         Assert(lock->nGranted <= lock->nRequested);
    1211                 :     }
    1212                 : 
    1213                 :     /*
    1214                 :      * Create the hash key for the proclock table.
    1215                 :      */
    1216         4042461 :     proclocktag.myLock = lock;
    1217         4042461 :     proclocktag.myProc = proc;
    1218                 : 
    1219         4042461 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    1220                 : 
    1221                 :     /*
    1222                 :      * Find or create a proclock entry with this tag
    1223                 :      */
    1224         4042461 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    1225                 :                                                         &proclocktag,
    1226                 :                                                         proclock_hashcode,
    1227                 :                                                         HASH_ENTER_NULL,
    1228                 :                                                         &found);
    1229         4042461 :     if (!proclock)
    1230                 :     {
    1231                 :         /* Oops, not enough shmem for the proclock */
    1232 UBC           0 :         if (lock->nRequested == 0)
    1233                 :         {
    1234                 :             /*
    1235                 :              * There are no other requestors of this lock, so garbage-collect
    1236                 :              * the lock object.  We *must* do this to avoid a permanent leak
    1237                 :              * of shared memory, because there won't be anything to cause
    1238                 :              * anyone to release the lock object later.
    1239                 :              */
    1240 UNC           0 :             Assert(dlist_is_empty(&(lock->procLocks)));
    1241 UBC           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    1242 UNC           0 :                                              &(lock->tag),
    1243                 :                                              hashcode,
    1244                 :                                              HASH_REMOVE,
    1245                 :                                              NULL))
    1246 UBC           0 :                 elog(PANIC, "lock table corrupted");
    1247                 :         }
    1248               0 :         return NULL;
    1249                 :     }
    1250                 : 
    1251                 :     /*
    1252                 :      * If new, initialize the new entry
    1253                 :      */
    1254 CBC     4042461 :     if (!found)
    1255                 :     {
    1256         2700380 :         uint32      partition = LockHashPartition(hashcode);
    1257                 : 
    1258                 :         /*
    1259                 :          * It might seem unsafe to access proclock->groupLeader without a
    1260                 :          * lock, but it's not really.  Either we are initializing a proclock
    1261                 :          * on our own behalf, in which case our group leader isn't changing
    1262                 :          * because the group leader for a process can only ever be changed by
    1263                 :          * the process itself; or else we are transferring a fast-path lock to
    1264                 :          * the main lock table, in which case that process can't change it's
    1265                 :          * lock group leader without first releasing all of its locks (and in
    1266                 :          * particular the one we are currently transferring).
    1267                 :          */
    1268         5400760 :         proclock->groupLeader = proc->lockGroupLeader != NULL ?
    1269         2700380 :             proc->lockGroupLeader : proc;
    1270         2700380 :         proclock->holdMask = 0;
    1271         2700380 :         proclock->releaseMask = 0;
    1272                 :         /* Add proclock to appropriate lists */
    1273 GNC     2700380 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    1274         2700380 :         dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
    1275                 :         PROCLOCK_PRINT("LockAcquire: new", proclock);
    1276                 :     }
    1277                 :     else
    1278                 :     {
    1279 ECB             :         PROCLOCK_PRINT("LockAcquire: found", proclock);
    1280 GIC     1342081 :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    1281                 : 
    1282                 : #ifdef CHECK_DEADLOCK_RISK
    1283                 : 
    1284                 :         /*
    1285                 :          * Issue warning if we already hold a lower-level lock on this object
    1286                 :          * and do not hold a lock of the requested level or higher. This
    1287                 :          * indicates a deadlock-prone coding practice (eg, we'd have a
    1288                 :          * deadlock if another backend were following the same code path at
    1289                 :          * about the same time).
    1290                 :          *
    1291                 :          * This is not enabled by default, because it may generate log entries
    1292                 :          * about user-level coding practices that are in fact safe in context.
    1293                 :          * It can be enabled to help find system-level problems.
    1294                 :          *
    1295                 :          * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
    1296                 :          * better to use a table.  For now, though, this works.
    1297                 :          */
    1298                 :         {
    1299                 :             int         i;
    1300                 : 
    1301                 :             for (i = lockMethodTable->numLockModes; i > 0; i--)
    1302                 :             {
    1303                 :                 if (proclock->holdMask & LOCKBIT_ON(i))
    1304                 :                 {
    1305                 :                     if (i >= (int) lockmode)
    1306                 :                         break;  /* safe: we have a lock >= req level */
    1307                 :                     elog(LOG, "deadlock risk: raising lock level"
    1308                 :                          " from %s to %s on object %u/%u/%u",
    1309                 :                          lockMethodTable->lockModeNames[i],
    1310                 :                          lockMethodTable->lockModeNames[lockmode],
    1311                 :                          lock->tag.locktag_field1, lock->tag.locktag_field2,
    1312                 :                          lock->tag.locktag_field3);
    1313                 :                     break;
    1314                 :                 }
    1315                 :             }
    1316                 :         }
    1317                 : #endif                          /* CHECK_DEADLOCK_RISK */
    1318                 :     }
    1319                 : 
    1320                 :     /*
    1321                 :      * lock->nRequested and lock->requested[] count the total number of
    1322                 :      * requests, whether granted or waiting, so increment those immediately.
    1323                 :      * The other counts don't increment till we get the lock.
    1324 ECB             :      */
    1325 CBC     4042461 :     lock->nRequested++;
    1326         4042461 :     lock->requested[lockmode]++;
    1327 GIC     4042461 :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1328                 : 
    1329                 :     /*
    1330                 :      * We shouldn't already hold the desired lock; else locallock table is
    1331                 :      * broken.
    1332 ECB             :      */
    1333 GBC     4042461 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    1334 UIC           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    1335                 :              lockMethodTable->lockModeNames[lockmode],
    1336                 :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    1337                 :              lock->tag.locktag_field3);
    1338 ECB             : 
    1339 GIC     4042461 :     return proclock;
    1340                 : }
    1341                 : 
    1342                 : /*
    1343                 :  * Check and set/reset the flag that we hold the relation extension/page lock.
    1344                 :  *
    1345                 :  * It is callers responsibility that this function is called after
    1346                 :  * acquiring/releasing the relation extension/page lock.
    1347                 :  *
    1348                 :  * Pass acquired as true if lock is acquired, false otherwise.
    1349                 :  */
    1350 ECB             : static inline void
    1351 GIC    52620064 : CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
    1352                 : {
    1353 ECB             : #ifdef USE_ASSERT_CHECKING
    1354 CBC    52620064 :     if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
    1355          651318 :         IsRelationExtensionLockHeld = acquired;
    1356        51968746 :     else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
    1357 GIC          80 :         IsPageLockHeld = acquired;
    1358                 : 
    1359 ECB             : #endif
    1360 GIC    52620064 : }
    1361                 : 
    1362                 : /*
    1363                 :  * Subroutine to free a locallock entry
    1364                 :  */
    1365 ECB             : static void
    1366 GIC    26137790 : RemoveLocalLock(LOCALLOCK *locallock)
    1367                 : {
    1368                 :     int         i;
    1369 ECB             : 
    1370 GIC    26196832 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    1371 ECB             :     {
    1372 CBC       59042 :         if (locallock->lockOwners[i].owner != NULL)
    1373 GIC       59008 :             ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
    1374 ECB             :     }
    1375 CBC    26137790 :     locallock->numLockOwners = 0;
    1376        26137790 :     if (locallock->lockOwners != NULL)
    1377        26137790 :         pfree(locallock->lockOwners);
    1378 GIC    26137790 :     locallock->lockOwners = NULL;
    1379 ECB             : 
    1380 GIC    26137790 :     if (locallock->holdsStrongLockCount)
    1381                 :     {
    1382                 :         uint32      fasthashcode;
    1383 ECB             : 
    1384 GIC      271338 :         fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1385 ECB             : 
    1386 CBC      271338 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1387          271338 :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1388          271338 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    1389          271338 :         locallock->holdsStrongLockCount = false;
    1390 GIC      271338 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1391                 :     }
    1392 ECB             : 
    1393 CBC    26137790 :     if (!hash_search(LockMethodLocalHash,
    1394 GNC    26137790 :                      &(locallock->tag),
    1395 EUB             :                      HASH_REMOVE, NULL))
    1396 UIC           0 :         elog(WARNING, "locallock table corrupted");
    1397                 : 
    1398                 :     /*
    1399                 :      * Indicate that the lock is released for certain types of locks
    1400 ECB             :      */
    1401 CBC    26137790 :     CheckAndSetLockHeld(locallock, false);
    1402 GIC    26137790 : }
    1403                 : 
    1404                 : /*
    1405                 :  * LockCheckConflicts -- test whether requested lock conflicts
    1406                 :  *      with those already granted
    1407                 :  *
    1408                 :  * Returns true if conflict, false if no conflict.
    1409                 :  *
    1410                 :  * NOTES:
    1411                 :  *      Here's what makes this complicated: one process's locks don't
    1412                 :  * conflict with one another, no matter what purpose they are held for
    1413                 :  * (eg, session and transaction locks do not conflict).  Nor do the locks
    1414                 :  * of one process in a lock group conflict with those of another process in
    1415                 :  * the same group.  So, we must subtract off these locks when determining
    1416                 :  * whether the requested new lock conflicts with those already held.
    1417                 :  */
    1418 ECB             : bool
    1419 GIC     4041873 : LockCheckConflicts(LockMethod lockMethodTable,
    1420                 :                    LOCKMODE lockmode,
    1421                 :                    LOCK *lock,
    1422                 :                    PROCLOCK *proclock)
    1423 ECB             : {
    1424 GIC     4041873 :     int         numLockModes = lockMethodTable->numLockModes;
    1425 ECB             :     LOCKMASK    myLocks;
    1426 GIC     4041873 :     int         conflictMask = lockMethodTable->conflictTab[lockmode];
    1427 ECB             :     int         conflictsRemaining[MAX_LOCKMODES];
    1428 GIC     4041873 :     int         totalConflictsRemaining = 0;
    1429                 :     dlist_iter  proclock_iter;
    1430                 :     int         i;
    1431                 : 
    1432                 :     /*
    1433                 :      * first check for global conflicts: If no locks conflict with my request,
    1434                 :      * then I get the lock.
    1435                 :      *
    1436                 :      * Checking for conflict: lock->grantMask represents the types of
    1437                 :      * currently held locks.  conflictTable[lockmode] has a bit set for each
    1438                 :      * type of lock that conflicts with request.   Bitwise compare tells if
    1439 ECB             :      * there is a conflict.
    1440                 :      */
    1441 GIC     4041873 :     if (!(conflictMask & lock->grantMask))
    1442 ECB             :     {
    1443                 :         PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
    1444 GIC     3783227 :         return false;
    1445                 :     }
    1446                 : 
    1447                 :     /*
    1448                 :      * Rats.  Something conflicts.  But it could still be my own lock, or a
    1449                 :      * lock held by another member of my locking group.  First, figure out how
    1450 ECB             :      * many conflicts remain after subtracting out any locks I hold myself.
    1451                 :      */
    1452 GIC      258646 :     myLocks = proclock->holdMask;
    1453 CBC     2327814 :     for (i = 1; i <= numLockModes; i++)
    1454                 :     {
    1455         2069168 :         if ((conflictMask & LOCKBIT_ON(i)) == 0)
    1456 ECB             :         {
    1457 GIC     1069300 :             conflictsRemaining[i] = 0;
    1458 CBC     1069300 :             continue;
    1459 ECB             :         }
    1460 CBC      999868 :         conflictsRemaining[i] = lock->granted[i];
    1461          999868 :         if (myLocks & LOCKBIT_ON(i))
    1462 GIC      260541 :             --conflictsRemaining[i];
    1463          999868 :         totalConflictsRemaining += conflictsRemaining[i];
    1464                 :     }
    1465 ECB             : 
    1466                 :     /* If no conflicts remain, we get the lock. */
    1467 GIC      258646 :     if (totalConflictsRemaining == 0)
    1468 ECB             :     {
    1469                 :         PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
    1470 GIC      256523 :         return false;
    1471                 :     }
    1472 ECB             : 
    1473                 :     /* If no group locking, it's definitely a conflict. */
    1474 CBC        2123 :     if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
    1475                 :     {
    1476 GIC        1649 :         Assert(proclock->tag.myProc == MyProc);
    1477 ECB             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
    1478                 :                        proclock);
    1479 GIC        1649 :         return true;
    1480                 :     }
    1481                 : 
    1482                 :     /*
    1483                 :      * The relation extension or page lock conflict even between the group
    1484 ECB             :      * members.
    1485                 :      */
    1486 GIC         474 :     if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
    1487             466 :         (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
    1488                 :     {
    1489 ECB             :         PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
    1490                 :                        proclock);
    1491 GIC           8 :         return true;
    1492                 :     }
    1493                 : 
    1494                 :     /*
    1495                 :      * Locks held in conflicting modes by members of our own lock group are
    1496                 :      * not real conflicts; we can subtract those out and see if we still have
    1497                 :      * a conflict.  This is O(N) in the number of processes holding or
    1498                 :      * awaiting locks on this object.  We could improve that by making the
    1499                 :      * shared memory state more complex (and larger) but it doesn't seem worth
    1500 ECB             :      * it.
    1501                 :      */
    1502 GNC         631 :     dlist_foreach(proclock_iter, &lock->procLocks)
    1503 ECB             :     {
    1504 GNC         597 :         PROCLOCK   *otherproclock =
    1505             597 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    1506                 : 
    1507 CBC         597 :         if (proclock != otherproclock &&
    1508 GIC         563 :             proclock->groupLeader == otherproclock->groupLeader &&
    1509 CBC         435 :             (otherproclock->holdMask & conflictMask) != 0)
    1510                 :         {
    1511             433 :             int         intersectMask = otherproclock->holdMask & conflictMask;
    1512                 : 
    1513            3897 :             for (i = 1; i <= numLockModes; i++)
    1514                 :             {
    1515            3464 :                 if ((intersectMask & LOCKBIT_ON(i)) != 0)
    1516 EUB             :                 {
    1517 CBC         440 :                     if (conflictsRemaining[i] <= 0)
    1518 LBC           0 :                         elog(PANIC, "proclocks held do not match lock");
    1519 GIC         440 :                     conflictsRemaining[i]--;
    1520             440 :                     totalConflictsRemaining--;
    1521                 :                 }
    1522 ECB             :             }
    1523                 : 
    1524 GIC         433 :             if (totalConflictsRemaining == 0)
    1525                 :             {
    1526 ECB             :                 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
    1527                 :                                proclock);
    1528 GIC         432 :                 return false;
    1529                 :             }
    1530                 :         }
    1531                 :     }
    1532                 : 
    1533                 :     /* Nope, it's a real conflict. */
    1534                 :     PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
    1535              34 :     return true;
    1536                 : }
    1537                 : 
    1538                 : /*
    1539                 :  * GrantLock -- update the lock and proclock data structures to show
    1540                 :  *      the lock request has been granted.
    1541                 :  *
    1542                 :  * NOTE: if proc was blocked, it also needs to be removed from the wait list
    1543                 :  * and have its waitLock/waitProcLock fields cleared.  That's not done here.
    1544                 :  *
    1545 ECB             :  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
    1546                 :  * table entry; but since we may be awaking some other process, we can't do
    1547                 :  * that here; it's done by GrantLockLocal, instead.
    1548                 :  */
    1549                 : void
    1550 CBC     4041850 : GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
    1551 ECB             : {
    1552 CBC     4041850 :     lock->nGranted++;
    1553 GIC     4041850 :     lock->granted[lockmode]++;
    1554 CBC     4041850 :     lock->grantMask |= LOCKBIT_ON(lockmode);
    1555         4041850 :     if (lock->granted[lockmode] == lock->requested[lockmode])
    1556         4041713 :         lock->waitMask &= LOCKBIT_OFF(lockmode);
    1557 GIC     4041850 :     proclock->holdMask |= LOCKBIT_ON(lockmode);
    1558                 :     LOCK_PRINT("GrantLock", lock, lockmode);
    1559         4041850 :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1560         4041850 :     Assert(lock->nGranted <= lock->nRequested);
    1561         4041850 : }
    1562                 : 
    1563                 : /*
    1564                 :  * UnGrantLock -- opposite of GrantLock.
    1565                 :  *
    1566                 :  * Updates the lock and proclock data structures to show that the lock
    1567                 :  * is no longer held nor requested by the current holder.
    1568 ECB             :  *
    1569                 :  * Returns true if there were any waiters waiting on the lock that
    1570                 :  * should now be woken up with ProcLockWakeup.
    1571                 :  */
    1572                 : static bool
    1573 CBC     4041779 : UnGrantLock(LOCK *lock, LOCKMODE lockmode,
    1574 ECB             :             PROCLOCK *proclock, LockMethod lockMethodTable)
    1575                 : {
    1576 GIC     4041779 :     bool        wakeupNeeded = false;
    1577                 : 
    1578         4041779 :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    1579         4041779 :     Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
    1580 CBC     4041779 :     Assert(lock->nGranted <= lock->nRequested);
    1581 ECB             : 
    1582                 :     /*
    1583                 :      * fix the general lock stats
    1584                 :      */
    1585 CBC     4041779 :     lock->nRequested--;
    1586 GIC     4041779 :     lock->requested[lockmode]--;
    1587         4041779 :     lock->nGranted--;
    1588 CBC     4041779 :     lock->granted[lockmode]--;
    1589                 : 
    1590 GIC     4041779 :     if (lock->granted[lockmode] == 0)
    1591                 :     {
    1592                 :         /* change the conflict mask.  No more of this lock type. */
    1593         4024055 :         lock->grantMask &= LOCKBIT_OFF(lockmode);
    1594                 :     }
    1595                 : 
    1596                 :     LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
    1597                 : 
    1598                 :     /*
    1599                 :      * We need only run ProcLockWakeup if the released lock conflicts with at
    1600                 :      * least one of the lock types requested by waiter(s).  Otherwise whatever
    1601                 :      * conflict made them wait must still exist.  NOTE: before MVCC, we could
    1602 ECB             :      * skip wakeup if lock->granted[lockmode] was still positive. But that's
    1603                 :      * not true anymore, because the remaining granted locks might belong to
    1604                 :      * some waiter, who could now be awakened because he doesn't conflict with
    1605                 :      * his own locks.
    1606                 :      */
    1607 GIC     4041779 :     if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
    1608 CBC         951 :         wakeupNeeded = true;
    1609                 : 
    1610                 :     /*
    1611 ECB             :      * Now fix the per-proclock state.
    1612                 :      */
    1613 GIC     4041779 :     proclock->holdMask &= LOCKBIT_OFF(lockmode);
    1614                 :     PROCLOCK_PRINT("UnGrantLock: updated", proclock);
    1615                 : 
    1616         4041779 :     return wakeupNeeded;
    1617                 : }
    1618                 : 
    1619                 : /*
    1620                 :  * CleanUpLock -- clean up after releasing a lock.  We garbage-collect the
    1621                 :  * proclock and lock objects if possible, and call ProcLockWakeup if there
    1622                 :  * are remaining requests and the caller says it's OK.  (Normally, this
    1623                 :  * should be called after UnGrantLock, and wakeupNeeded is the result from
    1624                 :  * UnGrantLock.)
    1625 ECB             :  *
    1626                 :  * The appropriate partition lock must be held at entry, and will be
    1627                 :  * held at exit.
    1628                 :  */
    1629                 : static void
    1630 GIC     4012126 : CleanUpLock(LOCK *lock, PROCLOCK *proclock,
    1631                 :             LockMethod lockMethodTable, uint32 hashcode,
    1632                 :             bool wakeupNeeded)
    1633 ECB             : {
    1634                 :     /*
    1635                 :      * If this was my last hold on this lock, delete my entry in the proclock
    1636                 :      * table.
    1637                 :      */
    1638 CBC     4012126 :     if (proclock->holdMask == 0)
    1639 ECB             :     {
    1640                 :         uint32      proclock_hashcode;
    1641                 : 
    1642                 :         PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
    1643 GNC     2699945 :         dlist_delete(&proclock->lockLink);
    1644         2699945 :         dlist_delete(&proclock->procLink);
    1645 GIC     2699945 :         proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
    1646 GBC     2699945 :         if (!hash_search_with_hash_value(LockMethodProcLockHash,
    1647 GNC     2699945 :                                          &(proclock->tag),
    1648                 :                                          proclock_hashcode,
    1649 ECB             :                                          HASH_REMOVE,
    1650                 :                                          NULL))
    1651 UIC           0 :             elog(PANIC, "proclock table corrupted");
    1652                 :     }
    1653                 : 
    1654 GIC     4012126 :     if (lock->nRequested == 0)
    1655                 :     {
    1656 ECB             :         /*
    1657                 :          * The caller just released the last lock, so garbage-collect the lock
    1658                 :          * object.
    1659                 :          */
    1660                 :         LOCK_PRINT("CleanUpLock: deleting", lock, 0);
    1661 GNC     2677476 :         Assert(dlist_is_empty(&lock->procLocks));
    1662 GBC     2677476 :         if (!hash_search_with_hash_value(LockMethodLockHash,
    1663 GNC     2677476 :                                          &(lock->tag),
    1664 ECB             :                                          hashcode,
    1665                 :                                          HASH_REMOVE,
    1666                 :                                          NULL))
    1667 LBC           0 :             elog(PANIC, "lock table corrupted");
    1668                 :     }
    1669 CBC     1334650 :     else if (wakeupNeeded)
    1670                 :     {
    1671                 :         /* There are waiters on this lock, so wake them up. */
    1672 GIC         995 :         ProcLockWakeup(lockMethodTable, lock);
    1673                 :     }
    1674         4012126 : }
    1675                 : 
    1676                 : /*
    1677                 :  * GrantLockLocal -- update the locallock data structures to show
    1678                 :  *      the lock request has been granted.
    1679 ECB             :  *
    1680                 :  * We expect that LockAcquire made sure there is room to add a new
    1681                 :  * ResourceOwner entry.
    1682                 :  */
    1683                 : static void
    1684 CBC    28609622 : GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
    1685                 : {
    1686        28609622 :     LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    1687                 :     int         i;
    1688 ECB             : 
    1689 GIC    28609622 :     Assert(locallock->numLockOwners < locallock->maxLockOwners);
    1690 ECB             :     /* Count the total */
    1691 GIC    28609622 :     locallock->nLocks++;
    1692 ECB             :     /* Count the per-owner lock */
    1693 CBC    29100035 :     for (i = 0; i < locallock->numLockOwners; i++)
    1694                 :     {
    1695 GIC     2617761 :         if (lockOwners[i].owner == owner)
    1696 ECB             :         {
    1697 CBC     2127348 :             lockOwners[i].nLocks++;
    1698         2127348 :             return;
    1699 ECB             :         }
    1700                 :     }
    1701 GIC    26482274 :     lockOwners[i].owner = owner;
    1702        26482274 :     lockOwners[i].nLocks = 1;
    1703 CBC    26482274 :     locallock->numLockOwners++;
    1704 GIC    26482274 :     if (owner != NULL)
    1705        26424487 :         ResourceOwnerRememberLock(owner, locallock);
    1706                 : 
    1707                 :     /* Indicate that the lock is acquired for certain types of locks. */
    1708        26482274 :     CheckAndSetLockHeld(locallock, true);
    1709                 : }
    1710                 : 
    1711 ECB             : /*
    1712                 :  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
    1713                 :  * and arrange for error cleanup if it fails
    1714                 :  */
    1715                 : static void
    1716 GIC      271615 : BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
    1717                 : {
    1718          271615 :     Assert(StrongLockInProgress == NULL);
    1719          271615 :     Assert(locallock->holdsStrongLockCount == false);
    1720                 : 
    1721                 :     /*
    1722                 :      * Adding to a memory location is not atomic, so we take a spinlock to
    1723                 :      * ensure we don't collide with someone else trying to bump the count at
    1724                 :      * the same time.
    1725 ECB             :      *
    1726                 :      * XXX: It might be worth considering using an atomic fetch-and-add
    1727                 :      * instruction here, on architectures where that is supported.
    1728                 :      */
    1729                 : 
    1730 CBC      271615 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1731 GIC      271615 :     FastPathStrongRelationLocks->count[fasthashcode]++;
    1732          271615 :     locallock->holdsStrongLockCount = true;
    1733          271615 :     StrongLockInProgress = locallock;
    1734          271615 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1735          271615 : }
    1736                 : 
    1737 ECB             : /*
    1738                 :  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
    1739                 :  * acquisition once it's no longer needed
    1740                 :  */
    1741                 : static void
    1742 GIC     4040178 : FinishStrongLockAcquire(void)
    1743                 : {
    1744         4040178 :     StrongLockInProgress = NULL;
    1745         4040178 : }
    1746                 : 
    1747 ECB             : /*
    1748                 :  * AbortStrongLockAcquire - undo strong lock state changes performed by
    1749                 :  * BeginStrongLockAcquire.
    1750                 :  */
    1751                 : void
    1752 CBC      511890 : AbortStrongLockAcquire(void)
    1753 ECB             : {
    1754                 :     uint32      fasthashcode;
    1755 CBC      511890 :     LOCALLOCK  *locallock = StrongLockInProgress;
    1756 ECB             : 
    1757 CBC      511890 :     if (locallock == NULL)
    1758          511678 :         return;
    1759 ECB             : 
    1760 CBC         212 :     fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
    1761             212 :     Assert(locallock->holdsStrongLockCount == true);
    1762             212 :     SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    1763 GIC         212 :     Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    1764             212 :     FastPathStrongRelationLocks->count[fasthashcode]--;
    1765             212 :     locallock->holdsStrongLockCount = false;
    1766             212 :     StrongLockInProgress = NULL;
    1767             212 :     SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    1768                 : }
    1769                 : 
    1770                 : /*
    1771                 :  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
    1772                 :  *      WaitOnLock on.
    1773                 :  *
    1774                 :  * proc.c needs this for the case where we are booted off the lock by
    1775                 :  * timeout, but discover that someone granted us the lock anyway.
    1776 ECB             :  *
    1777                 :  * We could just export GrantLockLocal, but that would require including
    1778                 :  * resowner.h in lock.h, which creates circularity.
    1779                 :  */
    1780                 : void
    1781 GIC         985 : GrantAwaitedLock(void)
    1782                 : {
    1783             985 :     GrantLockLocal(awaitedLock, awaitedOwner);
    1784             985 : }
    1785                 : 
    1786                 : /*
    1787                 :  * MarkLockClear -- mark an acquired lock as "clear"
    1788                 :  *
    1789 ECB             :  * This means that we know we have absorbed all sinval messages that other
    1790                 :  * sessions generated before we acquired this lock, and so we can confidently
    1791                 :  * assume we know about any catalog changes protected by this lock.
    1792                 :  */
    1793                 : void
    1794 GIC    25331040 : MarkLockClear(LOCALLOCK *locallock)
    1795                 : {
    1796        25331040 :     Assert(locallock->nLocks > 0);
    1797        25331040 :     locallock->lockCleared = true;
    1798        25331040 : }
    1799                 : 
    1800                 : /*
    1801                 :  * WaitOnLock -- wait to acquire a lock
    1802                 :  *
    1803                 :  * Caller must have set MyProc->heldLocks to reflect locks already held
    1804 ECB             :  * on the lockable object by this process.
    1805                 :  *
    1806                 :  * The appropriate partition lock must be held at entry.
    1807                 :  */
    1808                 : static void
    1809 GIC        1030 : WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
    1810                 : {
    1811            1030 :     LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
    1812            1030 :     LockMethod  lockMethodTable = LockMethods[lockmethodid];
    1813                 : 
    1814 ECB             :     LOCK_PRINT("WaitOnLock: sleeping on lock",
    1815                 :                locallock->lock, locallock->tag.mode);
    1816                 : 
    1817                 :     /* adjust the process title to indicate that it's waiting */
    1818 GNC        1030 :     set_ps_display_suffix("waiting");
    1819                 : 
    1820 GIC        1030 :     awaitedLock = locallock;
    1821            1030 :     awaitedOwner = owner;
    1822                 : 
    1823 ECB             :     /*
    1824                 :      * NOTE: Think not to put any shared-state cleanup after the call to
    1825                 :      * ProcSleep, in either the normal or failure path.  The lock state must
    1826                 :      * be fully set by the lock grantor, or by CheckDeadLock if we give up
    1827                 :      * waiting for the lock.  This is necessary because of the possibility
    1828                 :      * that a cancel/die interrupt will interrupt ProcSleep after someone else
    1829                 :      * grants us the lock, but before we've noticed it. Hence, after granting,
    1830                 :      * the locktable state must fully reflect the fact that we own the lock;
    1831                 :      * we can't do additional work on return.
    1832                 :      *
    1833                 :      * We can and do use a PG_TRY block to try to clean up after failure, but
    1834                 :      * this still has a major limitation: elog(FATAL) can occur while waiting
    1835                 :      * (eg, a "die" interrupt), and then control won't come back here. So all
    1836                 :      * cleanup of essential state should happen in LockErrorCleanup, not here.
    1837                 :      * We can use PG_TRY to clear the "waiting" status flags, since doing that
    1838                 :      * is unimportant if the process exits.
    1839                 :      */
    1840 CBC        1030 :     PG_TRY();
    1841                 :     {
    1842 GIC        1030 :         if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
    1843                 :         {
    1844 ECB             :             /*
    1845                 :              * We failed as a result of a deadlock, see CheckDeadLock(). Quit
    1846                 :              * now.
    1847                 :              */
    1848 GIC           5 :             awaitedLock = NULL;
    1849 ECB             :             LOCK_PRINT("WaitOnLock: aborting on lock",
    1850                 :                        locallock->lock, locallock->tag.mode);
    1851 GIC           5 :             LWLockRelease(LockHashPartitionLock(locallock->hashcode));
    1852 ECB             : 
    1853                 :             /*
    1854                 :              * Now that we aren't holding the partition lock, we can give an
    1855                 :              * error report including details about the detected deadlock.
    1856                 :              */
    1857 GIC           5 :             DeadLockReport();
    1858                 :             /* not reached */
    1859 ECB             :         }
    1860                 :     }
    1861 GIC          42 :     PG_CATCH();
    1862                 :     {
    1863 ECB             :         /* In this path, awaitedLock remains set until LockErrorCleanup */
    1864                 : 
    1865                 :         /* reset ps display to remove the suffix */
    1866 GNC          42 :         set_ps_display_remove_suffix();
    1867                 : 
    1868                 :         /* and propagate the error */
    1869 GIC          42 :         PG_RE_THROW();
    1870                 :     }
    1871             983 :     PG_END_TRY();
    1872 ECB             : 
    1873 GIC         983 :     awaitedLock = NULL;
    1874 ECB             : 
    1875                 :     /* reset ps display to remove the suffix */
    1876 GNC         983 :     set_ps_display_remove_suffix();
    1877 ECB             : 
    1878                 :     LOCK_PRINT("WaitOnLock: wakeup on lock",
    1879                 :                locallock->lock, locallock->tag.mode);
    1880 CBC         983 : }
    1881                 : 
    1882                 : /*
    1883 ECB             :  * Remove a proc from the wait-queue it is on (caller must know it is on one).
    1884                 :  * This is only used when the proc has failed to get the lock, so we set its
    1885                 :  * waitStatus to PROC_WAIT_STATUS_ERROR.
    1886                 :  *
    1887                 :  * Appropriate partition lock must be held by caller.  Also, caller is
    1888                 :  * responsible for signaling the proc if needed.
    1889                 :  *
    1890                 :  * NB: this does not clean up any locallock object that may exist for the lock.
    1891                 :  */
    1892                 : void
    1893 CBC          45 : RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
    1894                 : {
    1895 GIC          45 :     LOCK       *waitLock = proc->waitLock;
    1896 CBC          45 :     PROCLOCK   *proclock = proc->waitProcLock;
    1897              45 :     LOCKMODE    lockmode = proc->waitLockMode;
    1898              45 :     LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
    1899                 : 
    1900                 :     /* Make sure proc is waiting */
    1901 GIC          45 :     Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
    1902              45 :     Assert(proc->links.next != NULL);
    1903              45 :     Assert(waitLock);
    1904 GNC          45 :     Assert(!dclist_is_empty(&waitLock->waitProcs));
    1905 GIC          45 :     Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
    1906                 : 
    1907 ECB             :     /* Remove proc from lock's wait queue */
    1908 GNC          45 :     dclist_delete_from(&waitLock->waitProcs, &proc->links);
    1909 ECB             : 
    1910                 :     /* Undo increments of request counts by waiting process */
    1911 GIC          45 :     Assert(waitLock->nRequested > 0);
    1912              45 :     Assert(waitLock->nRequested > proc->waitLock->nGranted);
    1913              45 :     waitLock->nRequested--;
    1914              45 :     Assert(waitLock->requested[lockmode] > 0);
    1915              45 :     waitLock->requested[lockmode]--;
    1916                 :     /* don't forget to clear waitMask bit if appropriate */
    1917              45 :     if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
    1918              44 :         waitLock->waitMask &= LOCKBIT_OFF(lockmode);
    1919                 : 
    1920                 :     /* Clean up the proc's own state, and pass it the ok/fail signal */
    1921              45 :     proc->waitLock = NULL;
    1922              45 :     proc->waitProcLock = NULL;
    1923 CBC          45 :     proc->waitStatus = PROC_WAIT_STATUS_ERROR;
    1924                 : 
    1925 ECB             :     /*
    1926                 :      * Delete the proclock immediately if it represents no already-held locks.
    1927                 :      * (This must happen now because if the owner of the lock decides to
    1928                 :      * release it, and the requested/granted counts then go to zero,
    1929                 :      * LockRelease expects there to be no remaining proclocks.) Then see if
    1930                 :      * any other waiters for the lock can be woken up now.
    1931                 :      */
    1932 GIC          45 :     CleanUpLock(waitLock, proclock,
    1933              45 :                 LockMethods[lockmethodid], hashcode,
    1934 ECB             :                 true);
    1935 GBC          45 : }
    1936 ECB             : 
    1937                 : /*
    1938 EUB             :  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
    1939                 :  *      Release a session lock if 'sessionLock' is true, else release a
    1940                 :  *      regular transaction lock.
    1941                 :  *
    1942                 :  * Side Effects: find any waiting processes that are now wakable,
    1943                 :  *      grant them their requested locks and awaken them.
    1944                 :  *      (We have to grant the lock here to avoid a race between
    1945                 :  *      the waking process and any new process to
    1946                 :  *      come along and request the lock.)
    1947                 :  */
    1948                 : bool
    1949 GIC    25504916 : LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
    1950 ECB             : {
    1951 CBC    25504916 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    1952 ECB             :     LockMethod  lockMethodTable;
    1953                 :     LOCALLOCKTAG localtag;
    1954                 :     LOCALLOCK  *locallock;
    1955                 :     LOCK       *lock;
    1956                 :     PROCLOCK   *proclock;
    1957                 :     LWLock     *partitionLock;
    1958                 :     bool        wakeupNeeded;
    1959                 : 
    1960 GIC    25504916 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    1961 LBC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    1962 GIC    25504916 :     lockMethodTable = LockMethods[lockmethodid];
    1963 CBC    25504916 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    1964 UIC           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    1965 ECB             : 
    1966                 : #ifdef LOCK_DEBUG
    1967                 :     if (LOCK_DEBUG_ENABLED(locktag))
    1968                 :         elog(LOG, "LockRelease: lock [%u,%u] %s",
    1969                 :              locktag->locktag_field1, locktag->locktag_field2,
    1970                 :              lockMethodTable->lockModeNames[lockmode]);
    1971                 : #endif
    1972                 : 
    1973                 :     /*
    1974                 :      * Find the LOCALLOCK entry for this lock and lockmode
    1975                 :      */
    1976 GIC    25504916 :     MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
    1977 CBC    25504916 :     localtag.lock = *locktag;
    1978        25504916 :     localtag.mode = lockmode;
    1979                 : 
    1980        25504916 :     locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
    1981                 :                                           &localtag,
    1982 ECB             :                                           HASH_FIND, NULL);
    1983                 : 
    1984                 :     /*
    1985                 :      * let the caller print its own error message, too. Do not ereport(ERROR).
    1986                 :      */
    1987 CBC    25504916 :     if (!locallock || locallock->nLocks <= 0)
    1988                 :     {
    1989              13 :         elog(WARNING, "you don't own a lock of type %s",
    1990 ECB             :              lockMethodTable->lockModeNames[lockmode]);
    1991 GIC          13 :         return false;
    1992 ECB             :     }
    1993                 : 
    1994                 :     /*
    1995                 :      * Decrease the count for the resource owner.
    1996                 :      */
    1997                 :     {
    1998 GIC    25504903 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    1999 ECB             :         ResourceOwner owner;
    2000                 :         int         i;
    2001                 : 
    2002                 :         /* Identify owner for lock */
    2003 GIC    25504903 :         if (sessionLock)
    2004 CBC       57777 :             owner = NULL;
    2005                 :         else
    2006 GIC    25447126 :             owner = CurrentResourceOwner;
    2007                 : 
    2008        25505854 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2009                 :         {
    2010        25505842 :             if (lockOwners[i].owner == owner)
    2011                 :             {
    2012 CBC    25504891 :                 Assert(lockOwners[i].nLocks > 0);
    2013 GIC    25504891 :                 if (--lockOwners[i].nLocks == 0)
    2014 ECB             :                 {
    2015 CBC    24898131 :                     if (owner != NULL)
    2016 GIC    24840378 :                         ResourceOwnerForgetLock(owner, locallock);
    2017                 :                     /* compact out unused slot */
    2018        24898131 :                     locallock->numLockOwners--;
    2019        24898131 :                     if (i < locallock->numLockOwners)
    2020              52 :                         lockOwners[i] = lockOwners[locallock->numLockOwners];
    2021                 :                 }
    2022        25504891 :                 break;
    2023                 :             }
    2024 ECB             :         }
    2025 GIC    25504903 :         if (i < 0)
    2026                 :         {
    2027 ECB             :             /* don't release a lock belonging to another owner */
    2028 CBC          12 :             elog(WARNING, "you don't own a lock of type %s",
    2029                 :                  lockMethodTable->lockModeNames[lockmode]);
    2030 GIC          12 :             return false;
    2031                 :         }
    2032                 :     }
    2033                 : 
    2034                 :     /*
    2035                 :      * Decrease the total local count.  If we're still holding the lock, we're
    2036 ECB             :      * done.
    2037                 :      */
    2038 GIC    25504891 :     locallock->nLocks--;
    2039 ECB             : 
    2040 CBC    25504891 :     if (locallock->nLocks > 0)
    2041 GIC      824107 :         return true;
    2042 ECB             : 
    2043                 :     /*
    2044                 :      * At this point we can no longer suppose we are clear of invalidation
    2045                 :      * messages related to this lock.  Although we'll delete the LOCALLOCK
    2046                 :      * object before any intentional return from this routine, it seems worth
    2047                 :      * the trouble to explicitly reset lockCleared right now, just in case
    2048                 :      * some error prevents us from deleting the LOCALLOCK.
    2049                 :      */
    2050 CBC    24680784 :     locallock->lockCleared = false;
    2051                 : 
    2052 ECB             :     /* Attempt fast release of any lock eligible for the fast path. */
    2053 GIC    24680784 :     if (EligibleForRelationFastPath(locktag, lockmode) &&
    2054        23038994 :         FastPathLocalUseCount > 0)
    2055                 :     {
    2056                 :         bool        released;
    2057                 : 
    2058                 :         /*
    2059                 :          * We might not find the lock here, even if we originally entered it
    2060                 :          * here.  Another backend may have moved it to the main table.
    2061                 :          */
    2062 CBC    22238109 :         LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2063        22238109 :         released = FastPathUnGrantRelationLock(locktag->locktag_field2,
    2064                 :                                                lockmode);
    2065 GIC    22238109 :         LWLockRelease(&MyProc->fpInfoLock);
    2066        22238109 :         if (released)
    2067 ECB             :         {
    2068 CBC    21494485 :             RemoveLocalLock(locallock);
    2069 GIC    21494485 :             return true;
    2070                 :         }
    2071                 :     }
    2072                 : 
    2073 ECB             :     /*
    2074 EUB             :      * Otherwise we've got to mess with the shared lock table.
    2075 ECB             :      */
    2076 GIC     3186299 :     partitionLock = LockHashPartitionLock(locallock->hashcode);
    2077 ECB             : 
    2078 CBC     3186299 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2079 ECB             : 
    2080                 :     /*
    2081                 :      * Normally, we don't need to re-find the lock or proclock, since we kept
    2082                 :      * their addresses in the locallock table, and they couldn't have been
    2083                 :      * removed while we were holding a lock on them.  But it's possible that
    2084 EUB             :      * the lock was taken fast-path and has since been moved to the main hash
    2085                 :      * table by another backend, in which case we will need to look up the
    2086                 :      * objects here.  We assume the lock field is NULL if so.
    2087 ECB             :      */
    2088 GIC     3186299 :     lock = locallock->lock;
    2089         3186299 :     if (!lock)
    2090                 :     {
    2091                 :         PROCLOCKTAG proclocktag;
    2092                 : 
    2093               3 :         Assert(EligibleForRelationFastPath(locktag, lockmode));
    2094 CBC           3 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2095                 :                                                     locktag,
    2096                 :                                                     locallock->hashcode,
    2097 EUB             :                                                     HASH_FIND,
    2098                 :                                                     NULL);
    2099 GIC           3 :         if (!lock)
    2100 UBC           0 :             elog(ERROR, "failed to re-find shared lock object");
    2101 GBC           3 :         locallock->lock = lock;
    2102                 : 
    2103 GIC           3 :         proclocktag.myLock = lock;
    2104               3 :         proclocktag.myProc = MyProc;
    2105               3 :         locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
    2106                 :                                                        &proclocktag,
    2107 ECB             :                                                        HASH_FIND,
    2108                 :                                                        NULL);
    2109 CBC           3 :         if (!locallock->proclock)
    2110 UIC           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2111                 :     }
    2112                 :     LOCK_PRINT("LockRelease: found", lock, lockmode);
    2113 CBC     3186299 :     proclock = locallock->proclock;
    2114                 :     PROCLOCK_PRINT("LockRelease: found", proclock);
    2115 ECB             : 
    2116                 :     /*
    2117                 :      * Double-check that we are actually holding a lock of the type we want to
    2118                 :      * release.
    2119                 :      */
    2120 GIC     3186299 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    2121                 :     {
    2122                 :         PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
    2123 UIC           0 :         LWLockRelease(partitionLock);
    2124               0 :         elog(WARNING, "you don't own a lock of type %s",
    2125                 :              lockMethodTable->lockModeNames[lockmode]);
    2126               0 :         RemoveLocalLock(locallock);
    2127               0 :         return false;
    2128 ECB             :     }
    2129                 : 
    2130                 :     /*
    2131                 :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    2132                 :      */
    2133 GIC     3186299 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    2134                 : 
    2135         3186299 :     CleanUpLock(lock, proclock,
    2136                 :                 lockMethodTable, locallock->hashcode,
    2137 ECB             :                 wakeupNeeded);
    2138                 : 
    2139 CBC     3186299 :     LWLockRelease(partitionLock);
    2140 EUB             : 
    2141 CBC     3186299 :     RemoveLocalLock(locallock);
    2142 GIC     3186299 :     return true;
    2143                 : }
    2144                 : 
    2145                 : /*
    2146                 :  * LockReleaseAll -- Release all locks of the specified lock method that
    2147                 :  *      are held by the current process.
    2148                 :  *
    2149                 :  * Well, not necessarily *all* locks.  The available behaviors are:
    2150                 :  *      allLocks == true: release all locks including session locks.
    2151                 :  *      allLocks == false: release all non-session locks.
    2152                 :  */
    2153                 : void
    2154 CBC      984161 : LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
    2155 ECB             : {
    2156                 :     HASH_SEQ_STATUS status;
    2157                 :     LockMethod  lockMethodTable;
    2158                 :     int         i,
    2159                 :                 numLockModes;
    2160                 :     LOCALLOCK  *locallock;
    2161                 :     LOCK       *lock;
    2162                 :     int         partition;
    2163 GIC      984161 :     bool        have_fast_path_lwlock = false;
    2164                 : 
    2165          984161 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2166 LBC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2167 GIC      984161 :     lockMethodTable = LockMethods[lockmethodid];
    2168 ECB             : 
    2169                 : #ifdef LOCK_DEBUG
    2170                 :     if (*(lockMethodTable->trace_flag))
    2171                 :         elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
    2172                 : #endif
    2173                 : 
    2174                 :     /*
    2175                 :      * Get rid of our fast-path VXID lock, if appropriate.  Note that this is
    2176                 :      * the only way that the lock we hold on our own VXID can ever get
    2177                 :      * released: it is always and only released when a toplevel transaction
    2178                 :      * ends.
    2179                 :      */
    2180 GIC      984161 :     if (lockmethodid == DEFAULT_LOCKMETHOD)
    2181          486486 :         VirtualXactLockTableCleanup();
    2182 ECB             : 
    2183 CBC      984161 :     numLockModes = lockMethodTable->numLockModes;
    2184                 : 
    2185                 :     /*
    2186                 :      * First we run through the locallock table and get rid of unwanted
    2187                 :      * entries, then we scan the process's proclocks and get rid of those. We
    2188                 :      * do this separately because we may have multiple locallock entries
    2189                 :      * pointing to the same proclock, and we daren't end up with any dangling
    2190 ECB             :      * pointers.  Fast-path locks are cleaned up during the locallock table
    2191                 :      * scan, though.
    2192                 :      */
    2193 GIC      984161 :     hash_seq_init(&status, LockMethodLocalHash);
    2194                 : 
    2195 CBC     2557813 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2196                 :     {
    2197 ECB             :         /*
    2198                 :          * If the LOCALLOCK entry is unused, we must've run out of shared
    2199                 :          * memory while trying to set up this lock.  Just forget the local
    2200                 :          * entry.
    2201                 :          */
    2202 GIC     1573652 :         if (locallock->nLocks == 0)
    2203 ECB             :         {
    2204 CBC          45 :             RemoveLocalLock(locallock);
    2205              45 :             continue;
    2206                 :         }
    2207                 : 
    2208 ECB             :         /* Ignore items that are not of the lockmethod to be removed */
    2209 CBC     1573607 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2210 GIC       59108 :             continue;
    2211 ECB             : 
    2212                 :         /*
    2213                 :          * If we are asked to release all locks, we can just zap the entry.
    2214                 :          * Otherwise, must scan to see if there are session locks. We assume
    2215                 :          * there is at most one lockOwners entry for session locks.
    2216                 :          */
    2217 GIC     1514499 :         if (!allLocks)
    2218                 :         {
    2219         1456898 :             LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    2220                 : 
    2221 ECB             :             /* If session lock is above array position 0, move it down to 0 */
    2222 CBC     2954501 :             for (i = 0; i < locallock->numLockOwners; i++)
    2223 ECB             :             {
    2224 GIC     1497603 :                 if (lockOwners[i].owner == NULL)
    2225           59025 :                     lockOwners[0] = lockOwners[i];
    2226                 :                 else
    2227 CBC     1438578 :                     ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
    2228 EUB             :             }
    2229                 : 
    2230 GIC     1456898 :             if (locallock->numLockOwners > 0 &&
    2231         1456898 :                 lockOwners[0].owner == NULL &&
    2232           59025 :                 lockOwners[0].nLocks > 0)
    2233                 :             {
    2234                 :                 /* Fix the locallock to show just the session locks */
    2235           59025 :                 locallock->nLocks = lockOwners[0].nLocks;
    2236           59025 :                 locallock->numLockOwners = 1;
    2237                 :                 /* We aren't deleting this locallock, so done */
    2238 CBC       59025 :                 continue;
    2239                 :             }
    2240 ECB             :             else
    2241 CBC     1397873 :                 locallock->numLockOwners = 0;
    2242                 :         }
    2243                 : 
    2244                 :         /*
    2245 ECB             :          * If the lock or proclock pointers are NULL, this lock was taken via
    2246                 :          * the relation fast-path (and is not known to have been transferred).
    2247                 :          */
    2248 CBC     1455474 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    2249             955 :         {
    2250 GIC      602057 :             LOCKMODE    lockmode = locallock->tag.mode;
    2251                 :             Oid         relid;
    2252                 : 
    2253                 :             /* Verify that a fast-path lock is what we've got. */
    2254          602057 :             if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
    2255 UIC           0 :                 elog(PANIC, "locallock table corrupted");
    2256                 : 
    2257 ECB             :             /*
    2258                 :              * If we don't currently hold the LWLock that protects our
    2259                 :              * fast-path data structures, we must acquire it before attempting
    2260                 :              * to release the lock via the fast-path.  We will continue to
    2261                 :              * hold the LWLock until we're done scanning the locallock table,
    2262                 :              * unless we hit a transferred fast-path lock.  (XXX is this
    2263                 :              * really such a good idea?  There could be a lot of entries ...)
    2264                 :              */
    2265 GIC      602057 :             if (!have_fast_path_lwlock)
    2266                 :             {
    2267 CBC      219537 :                 LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2268 GIC      219537 :                 have_fast_path_lwlock = true;
    2269 ECB             :             }
    2270                 : 
    2271                 :             /* Attempt fast-path release. */
    2272 GIC      602057 :             relid = locallock->tag.lock.locktag_field2;
    2273          602057 :             if (FastPathUnGrantRelationLock(relid, lockmode))
    2274 ECB             :             {
    2275 CBC      601102 :                 RemoveLocalLock(locallock);
    2276 GIC      601102 :                 continue;
    2277                 :             }
    2278 ECB             : 
    2279                 :             /*
    2280                 :              * Our lock, originally taken via the fast path, has been
    2281                 :              * transferred to the main lock table.  That's going to require
    2282                 :              * some extra work, so release our fast-path lock before starting.
    2283                 :              */
    2284 GIC         955 :             LWLockRelease(&MyProc->fpInfoLock);
    2285             955 :             have_fast_path_lwlock = false;
    2286                 : 
    2287                 :             /*
    2288 ECB             :              * Now dump the lock.  We haven't got a pointer to the LOCK or
    2289                 :              * PROCLOCK in this case, so we have to handle this a bit
    2290                 :              * differently than a normal lock release.  Unfortunately, this
    2291                 :              * requires an extra LWLock acquire-and-release cycle on the
    2292                 :              * partitionLock, but hopefully it shouldn't happen often.
    2293                 :              */
    2294 CBC         955 :             LockRefindAndRelease(lockMethodTable, MyProc,
    2295                 :                                  &locallock->tag.lock, lockmode, false);
    2296 GIC         955 :             RemoveLocalLock(locallock);
    2297             955 :             continue;
    2298                 :         }
    2299                 : 
    2300                 :         /* Mark the proclock to show we need to release this lockmode */
    2301          853417 :         if (locallock->nLocks > 0)
    2302          853417 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    2303                 : 
    2304                 :         /* And remove the locallock hashtable entry */
    2305          853417 :         RemoveLocalLock(locallock);
    2306                 :     }
    2307                 : 
    2308                 :     /* Done with the fast-path data structures */
    2309          984161 :     if (have_fast_path_lwlock)
    2310          218582 :         LWLockRelease(&MyProc->fpInfoLock);
    2311                 : 
    2312                 :     /*
    2313                 :      * Now, scan each lock partition separately.
    2314                 :      */
    2315 CBC    16730737 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    2316 ECB             :     {
    2317                 :         LWLock     *partitionLock;
    2318 GNC    15746576 :         dlist_head *procLocks = &MyProc->myProcLocks[partition];
    2319                 :         dlist_mutable_iter proclock_iter;
    2320 ECB             : 
    2321 GIC    15746576 :         partitionLock = LockHashPartitionLockByIndex(partition);
    2322 ECB             : 
    2323                 :         /*
    2324                 :          * If the proclock list for this partition is empty, we can skip
    2325                 :          * acquiring the partition lock.  This optimization is trickier than
    2326                 :          * it looks, because another backend could be in process of adding
    2327                 :          * something to our proclock list due to promoting one of our
    2328                 :          * fast-path locks.  However, any such lock must be one that we
    2329                 :          * decided not to delete above, so it's okay to skip it again now;
    2330                 :          * we'd just decide not to delete it again.  We must, however, be
    2331                 :          * careful to re-fetch the list header once we've acquired the
    2332                 :          * partition lock, to be sure we have a valid, up-to-date pointer.
    2333                 :          * (There is probably no significant risk if pointer fetch/store is
    2334                 :          * atomic, but we don't wish to assume that.)
    2335                 :          *
    2336                 :          * XXX This argument assumes that the locallock table correctly
    2337                 :          * represents all of our fast-path locks.  While allLocks mode
    2338                 :          * guarantees to clean up all of our normal locks regardless of the
    2339                 :          * locallock situation, we lose that guarantee for fast-path locks.
    2340                 :          * This is not ideal.
    2341                 :          */
    2342 GNC    15746576 :         if (dlist_is_empty(procLocks))
    2343 GIC    14970807 :             continue;           /* needn't examine this partition */
    2344                 : 
    2345 CBC      775769 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2346 ECB             : 
    2347 GNC     1717023 :         dlist_foreach_modify(proclock_iter, procLocks)
    2348 ECB             :         {
    2349 GNC      941254 :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    2350 CBC      941254 :             bool        wakeupNeeded = false;
    2351 ECB             : 
    2352 GIC      941254 :             Assert(proclock->tag.myProc == MyProc);
    2353 ECB             : 
    2354 CBC      941254 :             lock = proclock->tag.myLock;
    2355                 : 
    2356                 :             /* Ignore items that are not of the lockmethod to be removed */
    2357          941254 :             if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
    2358           59105 :                 continue;
    2359                 : 
    2360                 :             /*
    2361 ECB             :              * In allLocks mode, force release of all locks even if locallock
    2362                 :              * table had problems
    2363                 :              */
    2364 CBC      882149 :             if (allLocks)
    2365 GIC       36933 :                 proclock->releaseMask = proclock->holdMask;
    2366 ECB             :             else
    2367 GIC      845216 :                 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
    2368                 : 
    2369                 :             /*
    2370 ECB             :              * Ignore items that have nothing to be released, unless they have
    2371                 :              * holdMask == 0 and are therefore recyclable
    2372                 :              */
    2373 GIC      882149 :             if (proclock->releaseMask == 0 && proclock->holdMask != 0)
    2374           58430 :                 continue;
    2375                 : 
    2376                 :             PROCLOCK_PRINT("LockReleaseAll", proclock);
    2377 ECB             :             LOCK_PRINT("LockReleaseAll", lock, 0);
    2378 GIC      823719 :             Assert(lock->nRequested >= 0);
    2379          823719 :             Assert(lock->nGranted >= 0);
    2380          823719 :             Assert(lock->nGranted <= lock->nRequested);
    2381          823719 :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    2382                 : 
    2383                 :             /*
    2384 ECB             :              * Release the previously-marked lock modes
    2385                 :              */
    2386 GIC     7413471 :             for (i = 1; i <= numLockModes; i++)
    2387                 :             {
    2388         6589752 :                 if (proclock->releaseMask & LOCKBIT_ON(i))
    2389 CBC      853417 :                     wakeupNeeded |= UnGrantLock(lock, i, proclock,
    2390 EUB             :                                                 lockMethodTable);
    2391                 :             }
    2392 CBC      823719 :             Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
    2393 GIC      823719 :             Assert(lock->nGranted <= lock->nRequested);
    2394 ECB             :             LOCK_PRINT("LockReleaseAll: updated", lock, 0);
    2395                 : 
    2396 GIC      823719 :             proclock->releaseMask = 0;
    2397 ECB             : 
    2398                 :             /* CleanUpLock will wake up waiters if needed. */
    2399 GIC      823719 :             CleanUpLock(lock, proclock,
    2400 ECB             :                         lockMethodTable,
    2401 GIC      823719 :                         LockTagHashCode(&lock->tag),
    2402 ECB             :                         wakeupNeeded);
    2403                 :         }                       /* loop over PROCLOCKs within this partition */
    2404                 : 
    2405 GIC      775769 :         LWLockRelease(partitionLock);
    2406                 :     }                           /* loop over partitions */
    2407                 : 
    2408                 : #ifdef LOCK_DEBUG
    2409                 :     if (*(lockMethodTable->trace_flag))
    2410                 :         elog(LOG, "LockReleaseAll done");
    2411                 : #endif
    2412          984161 : }
    2413                 : 
    2414 ECB             : /*
    2415                 :  * LockReleaseSession -- Release all session locks of the specified lock method
    2416                 :  *      that are held by the current process.
    2417                 :  */
    2418                 : void
    2419 GIC         119 : LockReleaseSession(LOCKMETHODID lockmethodid)
    2420                 : {
    2421 ECB             :     HASH_SEQ_STATUS status;
    2422                 :     LOCALLOCK  *locallock;
    2423                 : 
    2424 CBC         119 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2425 UIC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2426                 : 
    2427 GIC         119 :     hash_seq_init(&status, LockMethodLocalHash);
    2428                 : 
    2429             226 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2430 ECB             :     {
    2431                 :         /* Ignore items that are not of the specified lock method */
    2432 GIC         107 :         if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
    2433 CBC          10 :             continue;
    2434                 : 
    2435 GIC          97 :         ReleaseLockIfHeld(locallock, true);
    2436                 :     }
    2437             119 : }
    2438                 : 
    2439                 : /*
    2440                 :  * LockReleaseCurrentOwner
    2441                 :  *      Release all locks belonging to CurrentResourceOwner
    2442                 :  *
    2443                 :  * If the caller knows what those locks are, it can pass them as an array.
    2444                 :  * That speeds up the call significantly, when a lot of locks are held.
    2445                 :  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
    2446                 :  * table to find them.
    2447                 :  */
    2448                 : void
    2449 CBC        4600 : LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2450                 : {
    2451 GIC        4600 :     if (locallocks == NULL)
    2452                 :     {
    2453                 :         HASH_SEQ_STATUS status;
    2454                 :         LOCALLOCK  *locallock;
    2455                 : 
    2456 CBC           4 :         hash_seq_init(&status, LockMethodLocalHash);
    2457 ECB             : 
    2458 GIC         265 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2459 CBC         261 :             ReleaseLockIfHeld(locallock, false);
    2460                 :     }
    2461                 :     else
    2462 ECB             :     {
    2463                 :         int         i;
    2464                 : 
    2465 CBC        7039 :         for (i = nlocks - 1; i >= 0; i--)
    2466 GIC        2443 :             ReleaseLockIfHeld(locallocks[i], false);
    2467 ECB             :     }
    2468 CBC        4600 : }
    2469                 : 
    2470                 : /*
    2471                 :  * ReleaseLockIfHeld
    2472                 :  *      Release any session-level locks on this lockable object if sessionLock
    2473                 :  *      is true; else, release any locks held by CurrentResourceOwner.
    2474 ECB             :  *
    2475                 :  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
    2476                 :  * locks), but without refactoring LockRelease() we cannot support releasing
    2477                 :  * locks belonging to resource owners other than CurrentResourceOwner.
    2478                 :  * If we were to refactor, it'd be a good idea to fix it so we don't have to
    2479                 :  * do a hashtable lookup of the locallock, too.  However, currently this
    2480 EUB             :  * function isn't used heavily enough to justify refactoring for its
    2481                 :  * convenience.
    2482                 :  */
    2483                 : static void
    2484 CBC        2801 : ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
    2485                 : {
    2486 ECB             :     ResourceOwner owner;
    2487                 :     LOCALLOCKOWNER *lockOwners;
    2488                 :     int         i;
    2489                 : 
    2490                 :     /* Identify owner for lock (must match LockRelease!) */
    2491 GBC        2801 :     if (sessionLock)
    2492 GIC          97 :         owner = NULL;
    2493 ECB             :     else
    2494 GIC        2704 :         owner = CurrentResourceOwner;
    2495                 : 
    2496 ECB             :     /* Scan to see if there are any locks belonging to the target owner */
    2497 GIC        2801 :     lockOwners = locallock->lockOwners;
    2498            2991 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2499                 :     {
    2500            2801 :         if (lockOwners[i].owner == owner)
    2501                 :         {
    2502            2611 :             Assert(lockOwners[i].nLocks > 0);
    2503            2611 :             if (lockOwners[i].nLocks < locallock->nLocks)
    2504                 :             {
    2505                 :                 /*
    2506                 :                  * We will still hold this lock after forgetting this
    2507                 :                  * ResourceOwner.
    2508                 :                  */
    2509 CBC         669 :                 locallock->nLocks -= lockOwners[i].nLocks;
    2510                 :                 /* compact out unused slot */
    2511             669 :                 locallock->numLockOwners--;
    2512 GIC         669 :                 if (owner != NULL)
    2513 CBC         669 :                     ResourceOwnerForgetLock(owner, locallock);
    2514 GIC         669 :                 if (i < locallock->numLockOwners)
    2515 LBC           0 :                     lockOwners[i] = lockOwners[locallock->numLockOwners];
    2516                 :             }
    2517                 :             else
    2518                 :             {
    2519 GIC        1942 :                 Assert(lockOwners[i].nLocks == locallock->nLocks);
    2520 ECB             :                 /* We want to call LockRelease just once */
    2521 GIC        1942 :                 lockOwners[i].nLocks = 1;
    2522 CBC        1942 :                 locallock->nLocks = 1;
    2523            1942 :                 if (!LockRelease(&locallock->tag.lock,
    2524                 :                                  locallock->tag.mode,
    2525                 :                                  sessionLock))
    2526 UIC           0 :                     elog(WARNING, "ReleaseLockIfHeld: failed??");
    2527                 :             }
    2528 GIC        2611 :             break;
    2529 ECB             :         }
    2530                 :     }
    2531 GIC        2801 : }
    2532 ECB             : 
    2533                 : /*
    2534                 :  * LockReassignCurrentOwner
    2535                 :  *      Reassign all locks belonging to CurrentResourceOwner to belong
    2536                 :  *      to its parent resource owner.
    2537                 :  *
    2538                 :  * If the caller knows what those locks are, it can pass them as an array.
    2539                 :  * That speeds up the call significantly, when a lot of locks are held
    2540                 :  * (e.g pg_dump with a large schema).  Otherwise, pass NULL for locallocks,
    2541                 :  * and we'll traverse through our hash table to find them.
    2542                 :  */
    2543                 : void
    2544 CBC      482124 : LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
    2545                 : {
    2546 GIC      482124 :     ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
    2547                 : 
    2548          482124 :     Assert(parent != NULL);
    2549                 : 
    2550 CBC      482124 :     if (locallocks == NULL)
    2551 ECB             :     {
    2552                 :         HASH_SEQ_STATUS status;
    2553                 :         LOCALLOCK  *locallock;
    2554                 : 
    2555 CBC        3491 :         hash_seq_init(&status, LockMethodLocalHash);
    2556 ECB             : 
    2557 GIC       84751 :         while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    2558           81260 :             LockReassignOwner(locallock, parent);
    2559 ECB             :     }
    2560                 :     else
    2561                 :     {
    2562                 :         int         i;
    2563                 : 
    2564 GIC     1079427 :         for (i = nlocks - 1; i >= 0; i--)
    2565 CBC      600794 :             LockReassignOwner(locallocks[i], parent);
    2566 ECB             :     }
    2567 GIC      482124 : }
    2568                 : 
    2569                 : /*
    2570                 :  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
    2571 ECB             :  * CurrentResourceOwner to its parent.
    2572                 :  */
    2573                 : static void
    2574 CBC      682054 : LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
    2575 ECB             : {
    2576                 :     LOCALLOCKOWNER *lockOwners;
    2577                 :     int         i;
    2578 GIC      682054 :     int         ic = -1;
    2579          682054 :     int         ip = -1;
    2580                 : 
    2581                 :     /*
    2582                 :      * Scan to see if there are any locks belonging to current owner or its
    2583                 :      * parent
    2584                 :      */
    2585 CBC      682054 :     lockOwners = locallock->lockOwners;
    2586 GIC     1491583 :     for (i = locallock->numLockOwners - 1; i >= 0; i--)
    2587                 :     {
    2588 CBC      809529 :         if (lockOwners[i].owner == CurrentResourceOwner)
    2589 GIC      672779 :             ic = i;
    2590          136750 :         else if (lockOwners[i].owner == parent)
    2591 CBC       95098 :             ip = i;
    2592                 :     }
    2593 ECB             : 
    2594 CBC      682054 :     if (ic < 0)
    2595            9275 :         return;                 /* no current locks */
    2596                 : 
    2597          672779 :     if (ip < 0)
    2598 ECB             :     {
    2599                 :         /* Parent has no slot, so just give it the child's slot */
    2600 GIC      586925 :         lockOwners[ic].owner = parent;
    2601          586925 :         ResourceOwnerRememberLock(parent, locallock);
    2602                 :     }
    2603                 :     else
    2604 ECB             :     {
    2605                 :         /* Merge child's count with parent's */
    2606 CBC       85854 :         lockOwners[ip].nLocks += lockOwners[ic].nLocks;
    2607 ECB             :         /* compact out unused slot */
    2608 CBC       85854 :         locallock->numLockOwners--;
    2609           85854 :         if (ic < locallock->numLockOwners)
    2610 GIC         685 :             lockOwners[ic] = lockOwners[locallock->numLockOwners];
    2611                 :     }
    2612          672779 :     ResourceOwnerForgetLock(CurrentResourceOwner, locallock);
    2613 EUB             : }
    2614                 : 
    2615                 : /*
    2616                 :  * FastPathGrantRelationLock
    2617                 :  *      Grant lock using per-backend fast-path array, if there is space.
    2618                 :  */
    2619                 : static bool
    2620 GIC    22096918 : FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2621                 : {
    2622 ECB             :     uint32      f;
    2623 GIC    22096918 :     uint32      unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
    2624                 : 
    2625 ECB             :     /* Scan for existing entry for this relid, remembering empty slot. */
    2626 GIC   372805832 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2627 ECB             :     {
    2628 CBC   351647793 :         if (FAST_PATH_GET_BITS(MyProc, f) == 0)
    2629 GIC   304569020 :             unused_slot = f;
    2630 CBC    47078773 :         else if (MyProc->fpRelId[f] == relid)
    2631 ECB             :         {
    2632 GIC      938879 :             Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
    2633 CBC      938879 :             FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
    2634          938879 :             return true;
    2635 ECB             :         }
    2636                 :     }
    2637                 : 
    2638                 :     /* If no existing entry, use any empty slot. */
    2639 CBC    21158039 :     if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
    2640                 :     {
    2641        21158039 :         MyProc->fpRelId[unused_slot] = relid;
    2642 GIC    21158039 :         FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
    2643        21158039 :         ++FastPathLocalUseCount;
    2644        21158039 :         return true;
    2645                 :     }
    2646                 : 
    2647                 :     /* No existing entry, and no empty slot. */
    2648 UIC           0 :     return false;
    2649                 : }
    2650                 : 
    2651                 : /*
    2652 ECB             :  * FastPathUnGrantRelationLock
    2653                 :  *      Release fast-path lock, if present.  Update backend-private local
    2654                 :  *      use count, while we're at it.
    2655                 :  */
    2656                 : static bool
    2657 GIC    22840166 : FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
    2658                 : {
    2659                 :     uint32      f;
    2660        22840166 :     bool        result = false;
    2661                 : 
    2662        22840166 :     FastPathLocalUseCount = 0;
    2663       388282822 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2664                 :     {
    2665 CBC   365442656 :         if (MyProc->fpRelId[f] == relid
    2666 GIC    27758203 :             && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2667 ECB             :         {
    2668 GIC    22095587 :             Assert(!result);
    2669        22095587 :             FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2670 CBC    22095587 :             result = true;
    2671                 :             /* we continue iterating so as to update FastPathLocalUseCount */
    2672                 :         }
    2673 GIC   365442656 :         if (FAST_PATH_GET_BITS(MyProc, f) != 0)
    2674        52651815 :             ++FastPathLocalUseCount;
    2675                 :     }
    2676        22840166 :     return result;
    2677                 : }
    2678                 : 
    2679                 : /*
    2680                 :  * FastPathTransferRelationLocks
    2681                 :  *      Transfer locks matching the given lock tag from per-backend fast-path
    2682                 :  *      arrays to the shared hash table.
    2683                 :  *
    2684                 :  * Returns true if successful, false if ran out of shared memory.
    2685                 :  */
    2686                 : static bool
    2687 CBC      271615 : FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
    2688                 :                               uint32 hashcode)
    2689 ECB             : {
    2690 CBC      271615 :     LWLock     *partitionLock = LockHashPartitionLock(hashcode);
    2691 GIC      271615 :     Oid         relid = locktag->locktag_field2;
    2692                 :     uint32      i;
    2693 ECB             : 
    2694                 :     /*
    2695                 :      * Every PGPROC that can potentially hold a fast-path lock is present in
    2696                 :      * ProcGlobal->allProcs.  Prepared transactions are not, but any
    2697                 :      * outstanding fast-path locks held by prepared transactions are
    2698                 :      * transferred to the main lock table.
    2699                 :      */
    2700 GIC    31018920 :     for (i = 0; i < ProcGlobal->allProcCount; i++)
    2701                 :     {
    2702 CBC    30747305 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    2703 ECB             :         uint32      f;
    2704                 : 
    2705 CBC    30747305 :         LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    2706                 : 
    2707                 :         /*
    2708                 :          * If the target backend isn't referencing the same database as the
    2709 ECB             :          * lock, then we needn't examine the individual relation IDs at all;
    2710                 :          * none of them can be relevant.
    2711                 :          *
    2712                 :          * proc->databaseId is set at backend startup time and never changes
    2713                 :          * thereafter, so it might be safe to perform this test before
    2714                 :          * acquiring &proc->fpInfoLock.  In particular, it's certainly safe to
    2715 EUB             :          * assume that if the target backend holds any fast-path locks, it
    2716                 :          * must have performed a memory-fencing operation (in particular, an
    2717                 :          * LWLock acquisition) since setting proc->databaseId.  However, it's
    2718                 :          * less clear that our backend is certain to have performed a memory
    2719 ECB             :          * fencing operation since the other backend set proc->databaseId.  So
    2720                 :          * for now, we test it after acquiring the LWLock just to be safe.
    2721                 :          */
    2722 CBC    30747305 :         if (proc->databaseId != locktag->locktag_field1)
    2723                 :         {
    2724 GIC    23150998 :             LWLockRelease(&proc->fpInfoLock);
    2725 CBC    23150998 :             continue;
    2726                 :         }
    2727 ECB             : 
    2728 GIC   129136080 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2729 ECB             :         {
    2730                 :             uint32      lockmode;
    2731                 : 
    2732                 :             /* Look for an allocated slot matching the given relid. */
    2733 GIC   121540687 :             if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
    2734       121539773 :                 continue;
    2735                 : 
    2736                 :             /* Find or create lock object. */
    2737             914 :             LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2738             914 :             for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
    2739            3656 :                  lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
    2740 CBC        2742 :                  ++lockmode)
    2741                 :             {
    2742 ECB             :                 PROCLOCK   *proclock;
    2743                 : 
    2744 CBC        2742 :                 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
    2745            1776 :                     continue;
    2746             966 :                 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
    2747                 :                                             hashcode, lockmode);
    2748 GIC         966 :                 if (!proclock)
    2749 ECB             :                 {
    2750 UIC           0 :                     LWLockRelease(partitionLock);
    2751 LBC           0 :                     LWLockRelease(&proc->fpInfoLock);
    2752 UIC           0 :                     return false;
    2753                 :                 }
    2754 GIC         966 :                 GrantLock(proclock->tag.myLock, proclock, lockmode);
    2755             966 :                 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
    2756 ECB             :             }
    2757 CBC         914 :             LWLockRelease(partitionLock);
    2758                 : 
    2759                 :             /* No need to examine remaining slots. */
    2760             914 :             break;
    2761 ECB             :         }
    2762 GBC     7596307 :         LWLockRelease(&proc->fpInfoLock);
    2763                 :     }
    2764 GIC      271615 :     return true;
    2765 ECB             : }
    2766                 : 
    2767                 : /*
    2768                 :  * FastPathGetRelationLockEntry
    2769                 :  *      Return the PROCLOCK for a lock originally taken via the fast-path,
    2770                 :  *      transferring it to the primary lock table if necessary.
    2771 EUB             :  *
    2772                 :  * Note: caller takes care of updating the locallock object.
    2773                 :  */
    2774                 : static PROCLOCK *
    2775 GIC         373 : FastPathGetRelationLockEntry(LOCALLOCK *locallock)
    2776                 : {
    2777             373 :     LockMethod  lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
    2778 CBC         373 :     LOCKTAG    *locktag = &locallock->tag.lock;
    2779             373 :     PROCLOCK   *proclock = NULL;
    2780 GIC         373 :     LWLock     *partitionLock = LockHashPartitionLock(locallock->hashcode);
    2781 CBC         373 :     Oid         relid = locktag->locktag_field2;
    2782                 :     uint32      f;
    2783                 : 
    2784             373 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    2785                 : 
    2786 GIC        5956 :     for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2787 ECB             :     {
    2788                 :         uint32      lockmode;
    2789                 : 
    2790                 :         /* Look for an allocated slot matching the given relid. */
    2791 GIC        5948 :         if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
    2792            5583 :             continue;
    2793                 : 
    2794                 :         /* If we don't have a lock of the given mode, forget it! */
    2795             365 :         lockmode = locallock->tag.mode;
    2796 CBC         365 :         if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
    2797 UIC           0 :             break;
    2798 ECB             : 
    2799                 :         /* Find or create lock object. */
    2800 GIC         365 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    2801                 : 
    2802             365 :         proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
    2803 ECB             :                                     locallock->hashcode, lockmode);
    2804 GBC         365 :         if (!proclock)
    2805                 :         {
    2806 LBC           0 :             LWLockRelease(partitionLock);
    2807               0 :             LWLockRelease(&MyProc->fpInfoLock);
    2808 UIC           0 :             ereport(ERROR,
    2809 ECB             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    2810                 :                      errmsg("out of shared memory"),
    2811                 :                      errhint("You might need to increase max_locks_per_transaction.")));
    2812                 :         }
    2813 GIC         365 :         GrantLock(proclock->tag.myLock, proclock, lockmode);
    2814             365 :         FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
    2815                 : 
    2816 CBC         365 :         LWLockRelease(partitionLock);
    2817 EUB             : 
    2818 ECB             :         /* No need to examine remaining slots. */
    2819 GIC         365 :         break;
    2820                 :     }
    2821 ECB             : 
    2822 GIC         373 :     LWLockRelease(&MyProc->fpInfoLock);
    2823                 : 
    2824                 :     /* Lock may have already been transferred by some other backend. */
    2825             373 :     if (proclock == NULL)
    2826                 :     {
    2827                 :         LOCK       *lock;
    2828                 :         PROCLOCKTAG proclocktag;
    2829                 :         uint32      proclock_hashcode;
    2830                 : 
    2831               8 :         LWLockAcquire(partitionLock, LW_SHARED);
    2832                 : 
    2833               8 :         lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    2834                 :                                                     locktag,
    2835                 :                                                     locallock->hashcode,
    2836                 :                                                     HASH_FIND,
    2837                 :                                                     NULL);
    2838               8 :         if (!lock)
    2839 UIC           0 :             elog(ERROR, "failed to re-find shared lock object");
    2840                 : 
    2841 GIC           8 :         proclocktag.myLock = lock;
    2842               8 :         proclocktag.myProc = MyProc;
    2843                 : 
    2844 CBC           8 :         proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
    2845                 :         proclock = (PROCLOCK *)
    2846 GIC           8 :             hash_search_with_hash_value(LockMethodProcLockHash,
    2847                 :                                         &proclocktag,
    2848                 :                                         proclock_hashcode,
    2849                 :                                         HASH_FIND,
    2850                 :                                         NULL);
    2851               8 :         if (!proclock)
    2852 UIC           0 :             elog(ERROR, "failed to re-find shared proclock object");
    2853 GIC           8 :         LWLockRelease(partitionLock);
    2854                 :     }
    2855 ECB             : 
    2856 CBC         373 :     return proclock;
    2857                 : }
    2858 ECB             : 
    2859 EUB             : /*
    2860 ECB             :  * GetLockConflicts
    2861                 :  *      Get an array of VirtualTransactionIds of xacts currently holding locks
    2862 EUB             :  *      that would conflict with the specified lock/lockmode.
    2863                 :  *      xacts merely awaiting such a lock are NOT reported.
    2864                 :  *
    2865                 :  * The result array is palloc'd and is terminated with an invalid VXID.
    2866                 :  * *countp, if not null, is updated to the number of items set.
    2867                 :  *
    2868                 :  * Of course, the result could be out of date by the time it's returned, so
    2869 ECB             :  * use of this function has to be thought about carefully.  Similarly, a
    2870                 :  * PGPROC with no "lxid" will be considered non-conflicting regardless of any
    2871                 :  * lock it holds.  Existing callers don't care about a locker after that
    2872                 :  * locker's pg_xact updates complete.  CommitTransaction() clears "lxid" after
    2873                 :  * pg_xact updates and before releasing locks.
    2874                 :  *
    2875                 :  * Note we never include the current xact's vxid in the result array,
    2876                 :  * since an xact never blocks itself.
    2877                 :  */
    2878                 : VirtualTransactionId *
    2879 CBC        1157 : GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
    2880 ECB             : {
    2881                 :     static VirtualTransactionId *vxids;
    2882 GIC        1157 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    2883 ECB             :     LockMethod  lockMethodTable;
    2884                 :     LOCK       *lock;
    2885                 :     LOCKMASK    conflictMask;
    2886                 :     dlist_iter  proclock_iter;
    2887                 :     PROCLOCK   *proclock;
    2888                 :     uint32      hashcode;
    2889                 :     LWLock     *partitionLock;
    2890 GIC        1157 :     int         count = 0;
    2891            1157 :     int         fast_count = 0;
    2892 ECB             : 
    2893 GIC        1157 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    2894 UIC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    2895 CBC        1157 :     lockMethodTable = LockMethods[lockmethodid];
    2896 GIC        1157 :     if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
    2897 UIC           0 :         elog(ERROR, "unrecognized lock mode: %d", lockmode);
    2898                 : 
    2899                 :     /*
    2900                 :      * Allocate memory to store results, and fill with InvalidVXID.  We only
    2901                 :      * need enough space for MaxBackends + max_prepared_xacts + a terminator.
    2902                 :      * InHotStandby allocate once in TopMemoryContext.
    2903                 :      */
    2904 GIC        1157 :     if (InHotStandby)
    2905                 :     {
    2906               4 :         if (vxids == NULL)
    2907               1 :             vxids = (VirtualTransactionId *)
    2908 CBC           1 :                 MemoryContextAlloc(TopMemoryContext,
    2909                 :                                    sizeof(VirtualTransactionId) *
    2910               1 :                                    (MaxBackends + max_prepared_xacts + 1));
    2911                 :     }
    2912                 :     else
    2913 GIC        1153 :         vxids = (VirtualTransactionId *)
    2914 CBC        1153 :             palloc0(sizeof(VirtualTransactionId) *
    2915            1153 :                     (MaxBackends + max_prepared_xacts + 1));
    2916                 : 
    2917 ECB             :     /* Compute hash code and partition lock, and look up conflicting modes. */
    2918 GIC        1157 :     hashcode = LockTagHashCode(locktag);
    2919            1157 :     partitionLock = LockHashPartitionLock(hashcode);
    2920            1157 :     conflictMask = lockMethodTable->conflictTab[lockmode];
    2921                 : 
    2922                 :     /*
    2923                 :      * Fast path locks might not have been entered in the primary lock table.
    2924                 :      * If the lock we're dealing with could conflict with such a lock, we must
    2925                 :      * examine each backend's fast-path array for conflicts.
    2926                 :      */
    2927 CBC        1157 :     if (ConflictsWithRelationFastPath(locktag, lockmode))
    2928                 :     {
    2929 ECB             :         int         i;
    2930 CBC        1157 :         Oid         relid = locktag->locktag_field2;
    2931                 :         VirtualTransactionId vxid;
    2932                 : 
    2933 ECB             :         /*
    2934                 :          * Iterate over relevant PGPROCs.  Anything held by a prepared
    2935                 :          * transaction will have been transferred to the primary lock table,
    2936                 :          * so we need not worry about those.  This is all a bit fuzzy, because
    2937                 :          * new locks could be taken after we've visited a particular
    2938                 :          * partition, but the callers had better be prepared to deal with that
    2939                 :          * anyway, since the locks could equally well be taken between the
    2940                 :          * time we return the value and the time the caller does something
    2941                 :          * with it.
    2942                 :          */
    2943 CBC      130311 :         for (i = 0; i < ProcGlobal->allProcCount; i++)
    2944                 :         {
    2945 GIC      129154 :             PGPROC     *proc = &ProcGlobal->allProcs[i];
    2946                 :             uint32      f;
    2947                 : 
    2948                 :             /* A backend never blocks itself */
    2949 CBC      129154 :             if (proc == MyProc)
    2950            1157 :                 continue;
    2951                 : 
    2952 GIC      127997 :             LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    2953 ECB             : 
    2954                 :             /*
    2955                 :              * If the target backend isn't referencing the same database as
    2956                 :              * the lock, then we needn't examine the individual relation IDs
    2957                 :              * at all; none of them can be relevant.
    2958                 :              *
    2959                 :              * See FastPathTransferRelationLocks() for discussion of why we do
    2960                 :              * this test after acquiring the lock.
    2961                 :              */
    2962 GIC      127997 :             if (proc->databaseId != locktag->locktag_field1)
    2963 ECB             :             {
    2964 GIC       54779 :                 LWLockRelease(&proc->fpInfoLock);
    2965           54779 :                 continue;
    2966                 :             }
    2967                 : 
    2968 CBC     1244429 :             for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
    2969                 :             {
    2970                 :                 uint32      lockmask;
    2971                 : 
    2972                 :                 /* Look for an allocated slot matching the given relid. */
    2973         1171440 :                 if (relid != proc->fpRelId[f])
    2974 GIC     1170442 :                     continue;
    2975 CBC         998 :                 lockmask = FAST_PATH_GET_BITS(proc, f);
    2976 GIC         998 :                 if (!lockmask)
    2977             769 :                     continue;
    2978             229 :                 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
    2979                 : 
    2980 ECB             :                 /*
    2981                 :                  * There can only be one entry per relation, so if we found it
    2982                 :                  * and it doesn't conflict, we can skip the rest of the slots.
    2983                 :                  */
    2984 GIC         229 :                 if ((lockmask & conflictMask) == 0)
    2985               5 :                     break;
    2986 ECB             : 
    2987                 :                 /* Conflict! */
    2988 CBC         224 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    2989 ECB             : 
    2990 GBC         224 :                 if (VirtualTransactionIdIsValid(vxid))
    2991 CBC         224 :                     vxids[count++] = vxid;
    2992                 :                 /* else, xact already committed or aborted */
    2993                 : 
    2994                 :                 /* No need to examine remaining slots. */
    2995 GIC         224 :                 break;
    2996                 :             }
    2997 ECB             : 
    2998 GIC       73218 :             LWLockRelease(&proc->fpInfoLock);
    2999 ECB             :         }
    3000                 :     }
    3001                 : 
    3002                 :     /* Remember how many fast-path conflicts we found. */
    3003 CBC        1157 :     fast_count = count;
    3004                 : 
    3005                 :     /*
    3006 ECB             :      * Look up the lock object matching the tag.
    3007                 :      */
    3008 GIC        1157 :     LWLockAcquire(partitionLock, LW_SHARED);
    3009                 : 
    3010 CBC        1157 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3011                 :                                                 locktag,
    3012 ECB             :                                                 hashcode,
    3013                 :                                                 HASH_FIND,
    3014                 :                                                 NULL);
    3015 GIC        1157 :     if (!lock)
    3016                 :     {
    3017 ECB             :         /*
    3018                 :          * If the lock object doesn't exist, there is nothing holding a lock
    3019 EUB             :          * on this lockable object.
    3020 ECB             :          */
    3021 CBC          70 :         LWLockRelease(partitionLock);
    3022 GIC          70 :         vxids[count].backendId = InvalidBackendId;
    3023              70 :         vxids[count].localTransactionId = InvalidLocalTransactionId;
    3024              70 :         if (countp)
    3025 UIC           0 :             *countp = count;
    3026 GIC          70 :         return vxids;
    3027                 :     }
    3028 ECB             : 
    3029                 :     /*
    3030                 :      * Examine each existing holder (or awaiter) of the lock.
    3031 EUB             :      */
    3032 GNC        2197 :     dlist_foreach(proclock_iter, &lock->procLocks)
    3033                 :     {
    3034            1110 :         proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    3035                 : 
    3036 GIC        1110 :         if (conflictMask & proclock->holdMask)
    3037                 :         {
    3038            1106 :             PGPROC     *proc = proclock->tag.myProc;
    3039                 : 
    3040                 :             /* A backend never blocks itself */
    3041            1106 :             if (proc != MyProc)
    3042                 :             {
    3043                 :                 VirtualTransactionId vxid;
    3044                 : 
    3045              23 :                 GET_VXID_FROM_PGPROC(vxid, *proc);
    3046                 : 
    3047              23 :                 if (VirtualTransactionIdIsValid(vxid))
    3048 ECB             :                 {
    3049                 :                     int         i;
    3050                 : 
    3051                 :                     /* Avoid duplicate entries. */
    3052 GIC          43 :                     for (i = 0; i < fast_count; ++i)
    3053              20 :                         if (VirtualTransactionIdEquals(vxids[i], vxid))
    3054 UIC           0 :                             break;
    3055 GIC          23 :                     if (i >= fast_count)
    3056              23 :                         vxids[count++] = vxid;
    3057                 :                 }
    3058                 :                 /* else, xact already committed or aborted */
    3059                 :             }
    3060 ECB             :         }
    3061                 :     }
    3062                 : 
    3063 GIC        1087 :     LWLockRelease(partitionLock);
    3064                 : 
    3065 CBC        1087 :     if (count > MaxBackends + max_prepared_xacts)    /* should never happen */
    3066 UIC           0 :         elog(PANIC, "too many conflicting locks found");
    3067                 : 
    3068 GIC        1087 :     vxids[count].backendId = InvalidBackendId;
    3069            1087 :     vxids[count].localTransactionId = InvalidLocalTransactionId;
    3070 CBC        1087 :     if (countp)
    3071 GBC        1084 :         *countp = count;
    3072 GIC        1087 :     return vxids;
    3073                 : }
    3074                 : 
    3075                 : /*
    3076 ECB             :  * Find a lock in the shared lock table and release it.  It is the caller's
    3077                 :  * responsibility to verify that this is a sane thing to do.  (For example, it
    3078                 :  * would be bad to release a lock here if there might still be a LOCALLOCK
    3079                 :  * object with pointers to it.)
    3080                 :  *
    3081                 :  * We currently use this in two situations: first, to release locks held by
    3082                 :  * prepared transactions on commit (see lock_twophase_postcommit); and second,
    3083                 :  * to release locks taken via the fast-path, transferred to the main hash
    3084                 :  * table, and then released (see LockReleaseAll).
    3085                 :  */
    3086                 : static void
    3087 GBC        2063 : LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
    3088                 :                      LOCKTAG *locktag, LOCKMODE lockmode,
    3089                 :                      bool decrement_strong_lock_count)
    3090                 : {
    3091                 :     LOCK       *lock;
    3092                 :     PROCLOCK   *proclock;
    3093 ECB             :     PROCLOCKTAG proclocktag;
    3094                 :     uint32      hashcode;
    3095                 :     uint32      proclock_hashcode;
    3096 EUB             :     LWLock     *partitionLock;
    3097                 :     bool        wakeupNeeded;
    3098                 : 
    3099 GBC        2063 :     hashcode = LockTagHashCode(locktag);
    3100 GIC        2063 :     partitionLock = LockHashPartitionLock(hashcode);
    3101                 : 
    3102            2063 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3103                 : 
    3104                 :     /*
    3105 ECB             :      * Re-find the lock object (it had better be there).
    3106                 :      */
    3107 CBC        2063 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    3108                 :                                                 locktag,
    3109                 :                                                 hashcode,
    3110                 :                                                 HASH_FIND,
    3111 ECB             :                                                 NULL);
    3112 GIC        2063 :     if (!lock)
    3113 UIC           0 :         elog(PANIC, "failed to re-find shared lock object");
    3114                 : 
    3115                 :     /*
    3116 ECB             :      * Re-find the proclock object (ditto).
    3117                 :      */
    3118 GIC        2063 :     proclocktag.myLock = lock;
    3119 CBC        2063 :     proclocktag.myProc = proc;
    3120                 : 
    3121            2063 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    3122 ECB             : 
    3123 CBC        2063 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    3124                 :                                                         &proclocktag,
    3125                 :                                                         proclock_hashcode,
    3126                 :                                                         HASH_FIND,
    3127                 :                                                         NULL);
    3128 GIC        2063 :     if (!proclock)
    3129 UIC           0 :         elog(PANIC, "failed to re-find shared proclock object");
    3130                 : 
    3131                 :     /*
    3132                 :      * Double-check that we are actually holding a lock of the type we want to
    3133                 :      * release.
    3134                 :      */
    3135 GIC        2063 :     if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
    3136                 :     {
    3137                 :         PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
    3138 UIC           0 :         LWLockRelease(partitionLock);
    3139               0 :         elog(WARNING, "you don't own a lock of type %s",
    3140                 :              lockMethodTable->lockModeNames[lockmode]);
    3141               0 :         return;
    3142                 :     }
    3143                 : 
    3144                 :     /*
    3145                 :      * Do the releasing.  CleanUpLock will waken any now-wakable waiters.
    3146                 :      */
    3147 GIC        2063 :     wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
    3148                 : 
    3149 CBC        2063 :     CleanUpLock(lock, proclock,
    3150                 :                 lockMethodTable, hashcode,
    3151                 :                 wakeupNeeded);
    3152                 : 
    3153 GIC        2063 :     LWLockRelease(partitionLock);
    3154                 : 
    3155                 :     /*
    3156                 :      * Decrement strong lock count.  This logic is needed only for 2PC.
    3157                 :      */
    3158            2063 :     if (decrement_strong_lock_count
    3159             850 :         && ConflictsWithRelationFastPath(locktag, lockmode))
    3160                 :     {
    3161              64 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    3162                 : 
    3163              64 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    3164 CBC          64 :         Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
    3165              64 :         FastPathStrongRelationLocks->count[fasthashcode]--;
    3166              64 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    3167                 :     }
    3168 ECB             : }
    3169                 : 
    3170                 : /*
    3171                 :  * CheckForSessionAndXactLocks
    3172                 :  *      Check to see if transaction holds both session-level and xact-level
    3173                 :  *      locks on the same object; if so, throw an error.
    3174                 :  *
    3175                 :  * If we have both session- and transaction-level locks on the same object,
    3176                 :  * PREPARE TRANSACTION must fail.  This should never happen with regular
    3177                 :  * locks, since we only take those at session level in some special operations
    3178                 :  * like VACUUM.  It's possible to hit this with advisory locks, though.
    3179                 :  *
    3180                 :  * It would be nice if we could keep the session hold and give away the
    3181                 :  * transactional hold to the prepared xact.  However, that would require two
    3182                 :  * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
    3183                 :  * available when it comes time for PostPrepare_Locks to do the deed.
    3184                 :  * So for now, we error out while we can still do so safely.
    3185                 :  *
    3186                 :  * Since the LOCALLOCK table stores a separate entry for each lockmode,
    3187                 :  * we can't implement this check by examining LOCALLOCK entries in isolation.
    3188 EUB             :  * We must build a transient hashtable that is indexed by locktag only.
    3189                 :  */
    3190                 : static void
    3191 CBC         363 : CheckForSessionAndXactLocks(void)
    3192 EUB             : {
    3193                 :     typedef struct
    3194                 :     {
    3195 ECB             :         LOCKTAG     lock;       /* identifies the lockable object */
    3196                 :         bool        sessLock;   /* is any lockmode held at session level? */
    3197                 :         bool        xactLock;   /* is any lockmode held at xact level? */
    3198                 :     } PerLockTagEntry;
    3199                 : 
    3200                 :     HASHCTL     hash_ctl;
    3201                 :     HTAB       *lockhtab;
    3202                 :     HASH_SEQ_STATUS status;
    3203                 :     LOCALLOCK  *locallock;
    3204                 : 
    3205                 :     /* Create a local hash table keyed by LOCKTAG only */
    3206 GIC         363 :     hash_ctl.keysize = sizeof(LOCKTAG);
    3207 CBC         363 :     hash_ctl.entrysize = sizeof(PerLockTagEntry);
    3208 GIC         363 :     hash_ctl.hcxt = CurrentMemoryContext;
    3209                 : 
    3210             363 :     lockhtab = hash_create("CheckForSessionAndXactLocks table",
    3211                 :                            256, /* arbitrary initial size */
    3212                 :                            &hash_ctl,
    3213                 :                            HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
    3214 ECB             : 
    3215                 :     /* Scan local lock table to find entries for each LOCKTAG */
    3216 GIC         363 :     hash_seq_init(&status, LockMethodLocalHash);
    3217                 : 
    3218            1213 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3219                 :     {
    3220             852 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3221 ECB             :         PerLockTagEntry *hentry;
    3222                 :         bool        found;
    3223                 :         int         i;
    3224                 : 
    3225                 :         /*
    3226                 :          * Ignore VXID locks.  We don't want those to be held by prepared
    3227                 :          * transactions, since they aren't meaningful after a restart.
    3228                 :          */
    3229 GIC         852 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3230 UIC           0 :             continue;
    3231                 : 
    3232                 :         /* Ignore it if we don't actually hold the lock */
    3233 GIC         852 :         if (locallock->nLocks <= 0)
    3234 UIC           0 :             continue;
    3235                 : 
    3236                 :         /* Otherwise, find or make an entry in lockhtab */
    3237 CBC         852 :         hentry = (PerLockTagEntry *) hash_search(lockhtab,
    3238 GNC         852 :                                                  &locallock->tag.lock,
    3239                 :                                                  HASH_ENTER, &found);
    3240 GIC         852 :         if (!found)             /* initialize, if newly created */
    3241             806 :             hentry->sessLock = hentry->xactLock = false;
    3242                 : 
    3243 ECB             :         /* Scan to see if we hold lock at session or xact level or both */
    3244 GIC        1704 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3245                 :         {
    3246 CBC         852 :             if (lockOwners[i].owner == NULL)
    3247 GIC           9 :                 hentry->sessLock = true;
    3248 ECB             :             else
    3249 GIC         843 :                 hentry->xactLock = true;
    3250                 :         }
    3251 ECB             : 
    3252                 :         /*
    3253                 :          * We can throw error immediately when we see both types of locks; no
    3254                 :          * need to wait around to see if there are more violations.
    3255                 :          */
    3256 GIC         852 :         if (hentry->sessLock && hentry->xactLock)
    3257               2 :             ereport(ERROR,
    3258                 :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3259                 :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3260 ECB             :     }
    3261                 : 
    3262                 :     /* Success, so clean up */
    3263 GIC         361 :     hash_destroy(lockhtab);
    3264 CBC         361 : }
    3265 EUB             : 
    3266                 : /*
    3267                 :  * AtPrepare_Locks
    3268 ECB             :  *      Do the preparatory work for a PREPARE: make 2PC state file records
    3269                 :  *      for all locks currently held.
    3270                 :  *
    3271                 :  * Session-level locks are ignored, as are VXID locks.
    3272                 :  *
    3273                 :  * For the most part, we don't need to touch shared memory for this ---
    3274                 :  * all the necessary state information is in the locallock table.
    3275                 :  * Fast-path locks are an exception, however: we move any such locks to
    3276                 :  * the main table before allowing PREPARE TRANSACTION to succeed.
    3277                 :  */
    3278                 : void
    3279 CBC         363 : AtPrepare_Locks(void)
    3280                 : {
    3281                 :     HASH_SEQ_STATUS status;
    3282 ECB             :     LOCALLOCK  *locallock;
    3283 EUB             : 
    3284                 :     /* First, verify there aren't locks of both xact and session level */
    3285 GIC         363 :     CheckForSessionAndXactLocks();
    3286                 : 
    3287                 :     /* Now do the per-locallock cleanup work */
    3288             361 :     hash_seq_init(&status, LockMethodLocalHash);
    3289                 : 
    3290            1208 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3291                 :     {
    3292                 :         TwoPhaseLockRecord record;
    3293 CBC         847 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3294                 :         bool        haveSessionLock;
    3295 ECB             :         bool        haveXactLock;
    3296                 :         int         i;
    3297                 : 
    3298                 :         /*
    3299                 :          * Ignore VXID locks.  We don't want those to be held by prepared
    3300                 :          * transactions, since they aren't meaningful after a restart.
    3301                 :          */
    3302 GIC         847 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3303               7 :             continue;
    3304 ECB             : 
    3305                 :         /* Ignore it if we don't actually hold the lock */
    3306 GIC         847 :         if (locallock->nLocks <= 0)
    3307 UIC           0 :             continue;
    3308                 : 
    3309 ECB             :         /* Scan to see whether we hold it at session or transaction level */
    3310 CBC         847 :         haveSessionLock = haveXactLock = false;
    3311 GIC        1694 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3312 ECB             :         {
    3313 GIC         847 :             if (lockOwners[i].owner == NULL)
    3314               7 :                 haveSessionLock = true;
    3315 ECB             :             else
    3316 GIC         840 :                 haveXactLock = true;
    3317                 :         }
    3318                 : 
    3319                 :         /* Ignore it if we have only session lock */
    3320             847 :         if (!haveXactLock)
    3321               7 :             continue;
    3322                 : 
    3323                 :         /* This can't happen, because we already checked it */
    3324             840 :         if (haveSessionLock)
    3325 UIC           0 :             ereport(ERROR,
    3326                 :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3327                 :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3328                 : 
    3329                 :         /*
    3330                 :          * If the local lock was taken via the fast-path, we need to move it
    3331                 :          * to the primary lock table, or just get a pointer to the existing
    3332                 :          * primary lock table entry if by chance it's already been
    3333 ECB             :          * transferred.
    3334                 :          */
    3335 CBC         840 :         if (locallock->proclock == NULL)
    3336                 :         {
    3337 GIC         373 :             locallock->proclock = FastPathGetRelationLockEntry(locallock);
    3338             373 :             locallock->lock = locallock->proclock->tag.myLock;
    3339                 :         }
    3340                 : 
    3341                 :         /*
    3342                 :          * Arrange to not release any strong lock count held by this lock
    3343                 :          * entry.  We must retain the count until the prepared transaction is
    3344 ECB             :          * committed or rolled back.
    3345                 :          */
    3346 GIC         840 :         locallock->holdsStrongLockCount = false;
    3347                 : 
    3348 ECB             :         /*
    3349                 :          * Create a 2PC record.
    3350                 :          */
    3351 GIC         840 :         memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
    3352             840 :         record.lockmode = locallock->tag.mode;
    3353                 : 
    3354             840 :         RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0,
    3355                 :                                &record, sizeof(TwoPhaseLockRecord));
    3356                 :     }
    3357             361 : }
    3358                 : 
    3359 ECB             : /*
    3360                 :  * PostPrepare_Locks
    3361                 :  *      Clean up after successful PREPARE
    3362                 :  *
    3363                 :  * Here, we want to transfer ownership of our locks to a dummy PGPROC
    3364                 :  * that's now associated with the prepared transaction, and we want to
    3365                 :  * clean out the corresponding entries in the LOCALLOCK table.
    3366                 :  *
    3367                 :  * Note: by removing the LOCALLOCK entries, we are leaving dangling
    3368                 :  * pointers in the transaction's resource owner.  This is OK at the
    3369                 :  * moment since resowner.c doesn't try to free locks retail at a toplevel
    3370                 :  * transaction commit or abort.  We could alternatively zero out nLocks
    3371                 :  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
    3372                 :  * but that probably costs more cycles.
    3373                 :  */
    3374 EUB             : void
    3375 GBC         361 : PostPrepare_Locks(TransactionId xid)
    3376 EUB             : {
    3377 GIC         361 :     PGPROC     *newproc = TwoPhaseGetDummyProc(xid, false);
    3378                 :     HASH_SEQ_STATUS status;
    3379                 :     LOCALLOCK  *locallock;
    3380 ECB             :     LOCK       *lock;
    3381 EUB             :     PROCLOCK   *proclock;
    3382                 :     PROCLOCKTAG proclocktag;
    3383                 :     int         partition;
    3384 ECB             : 
    3385                 :     /* Can't prepare a lock group follower. */
    3386 GIC         361 :     Assert(MyProc->lockGroupLeader == NULL ||
    3387 ECB             :            MyProc->lockGroupLeader == MyProc);
    3388                 : 
    3389                 :     /* This is a critical section: any error means big trouble */
    3390 CBC         361 :     START_CRIT_SECTION();
    3391                 : 
    3392                 :     /*
    3393                 :      * First we run through the locallock table and get rid of unwanted
    3394 ECB             :      * entries, then we scan the process's proclocks and transfer them to the
    3395                 :      * target proc.
    3396                 :      *
    3397                 :      * We do this separately because we may have multiple locallock entries
    3398                 :      * pointing to the same proclock, and we daren't end up with any dangling
    3399 EUB             :      * pointers.
    3400                 :      */
    3401 GIC         361 :     hash_seq_init(&status, LockMethodLocalHash);
    3402                 : 
    3403            1208 :     while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
    3404 ECB             :     {
    3405 CBC         847 :         LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
    3406                 :         bool        haveSessionLock;
    3407                 :         bool        haveXactLock;
    3408 ECB             :         int         i;
    3409                 : 
    3410 GIC         847 :         if (locallock->proclock == NULL || locallock->lock == NULL)
    3411                 :         {
    3412                 :             /*
    3413                 :              * We must've run out of shared memory while trying to set up this
    3414 ECB             :              * lock.  Just forget the local entry.
    3415                 :              */
    3416 UIC           0 :             Assert(locallock->nLocks == 0);
    3417 LBC           0 :             RemoveLocalLock(locallock);
    3418 UIC           0 :             continue;
    3419                 :         }
    3420 ECB             : 
    3421                 :         /* Ignore VXID locks */
    3422 GIC         847 :         if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3423 UIC           0 :             continue;
    3424                 : 
    3425                 :         /* Scan to see whether we hold it at session or transaction level */
    3426 GIC         847 :         haveSessionLock = haveXactLock = false;
    3427            1694 :         for (i = locallock->numLockOwners - 1; i >= 0; i--)
    3428                 :         {
    3429             847 :             if (lockOwners[i].owner == NULL)
    3430 CBC           7 :                 haveSessionLock = true;
    3431 ECB             :             else
    3432 GIC         840 :                 haveXactLock = true;
    3433 ECB             :         }
    3434                 : 
    3435                 :         /* Ignore it if we have only session lock */
    3436 GIC         847 :         if (!haveXactLock)
    3437 CBC           7 :             continue;
    3438                 : 
    3439 ECB             :         /* This can't happen, because we already checked it */
    3440 GIC         840 :         if (haveSessionLock)
    3441 LBC           0 :             ereport(PANIC,
    3442                 :                     (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    3443                 :                      errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
    3444 ECB             : 
    3445                 :         /* Mark the proclock to show we need to release this lockmode */
    3446 GIC         840 :         if (locallock->nLocks > 0)
    3447             840 :             locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
    3448                 : 
    3449 ECB             :         /* And remove the locallock hashtable entry */
    3450 CBC         840 :         RemoveLocalLock(locallock);
    3451 ECB             :     }
    3452                 : 
    3453                 :     /*
    3454                 :      * Now, scan each lock partition separately.
    3455                 :      */
    3456 CBC        6137 :     for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
    3457                 :     {
    3458                 :         LWLock     *partitionLock;
    3459 GNC        5776 :         dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
    3460                 :         dlist_mutable_iter proclock_iter;
    3461                 : 
    3462 GIC        5776 :         partitionLock = LockHashPartitionLockByIndex(partition);
    3463                 : 
    3464                 :         /*
    3465                 :          * If the proclock list for this partition is empty, we can skip
    3466                 :          * acquiring the partition lock.  This optimization is safer than the
    3467                 :          * situation in LockReleaseAll, because we got rid of any fast-path
    3468                 :          * locks during AtPrepare_Locks, so there cannot be any case where
    3469                 :          * another backend is adding something to our lists now.  For safety,
    3470                 :          * though, we code this the same way as in LockReleaseAll.
    3471                 :          */
    3472 GNC        5776 :         if (dlist_is_empty(procLocks))
    3473 GIC        4976 :             continue;           /* needn't examine this partition */
    3474 ECB             : 
    3475 GIC         800 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    3476                 : 
    3477 GNC        1638 :         dlist_foreach_modify(proclock_iter, procLocks)
    3478                 :         {
    3479             838 :             proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
    3480 ECB             : 
    3481 CBC         838 :             Assert(proclock->tag.myProc == MyProc);
    3482                 : 
    3483 GIC         838 :             lock = proclock->tag.myLock;
    3484                 : 
    3485                 :             /* Ignore VXID locks */
    3486             838 :             if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
    3487              35 :                 continue;
    3488 ECB             : 
    3489                 :             PROCLOCK_PRINT("PostPrepare_Locks", proclock);
    3490                 :             LOCK_PRINT("PostPrepare_Locks", lock, 0);
    3491 GBC         803 :             Assert(lock->nRequested >= 0);
    3492 GIC         803 :             Assert(lock->nGranted >= 0);
    3493             803 :             Assert(lock->nGranted <= lock->nRequested);
    3494 CBC         803 :             Assert((proclock->holdMask & ~lock->grantMask) == 0);
    3495                 : 
    3496                 :             /* Ignore it if nothing to release (must be a session lock) */
    3497 GIC         803 :             if (proclock->releaseMask == 0)
    3498               7 :                 continue;
    3499 ECB             : 
    3500                 :             /* Else we should be releasing all locks */
    3501 GIC         796 :             if (proclock->releaseMask != proclock->holdMask)
    3502 LBC           0 :                 elog(PANIC, "we seem to have dropped a bit somewhere");
    3503 ECB             : 
    3504                 :             /*
    3505                 :              * We cannot simply modify proclock->tag.myProc to reassign
    3506                 :              * ownership of the lock, because that's part of the hash key and
    3507                 :              * the proclock would then be in the wrong hash chain.  Instead
    3508                 :              * use hash_update_hash_key.  (We used to create a new hash entry,
    3509                 :              * but that risks out-of-memory failure if other processes are
    3510                 :              * busy making proclocks too.)  We must unlink the proclock from
    3511                 :              * our procLink chain and put it into the new proc's chain, too.
    3512                 :              *
    3513                 :              * Note: the updated proclock hash key will still belong to the
    3514                 :              * same hash partition, cf proclock_hash().  So the partition lock
    3515                 :              * we already hold is sufficient for this.
    3516                 :              */
    3517 GNC         796 :             dlist_delete(&proclock->procLink);
    3518                 : 
    3519                 :             /*
    3520 ECB             :              * Create the new hash key for the proclock.
    3521                 :              */
    3522 GIC         796 :             proclocktag.myLock = lock;
    3523             796 :             proclocktag.myProc = newproc;
    3524                 : 
    3525                 :             /*
    3526 ECB             :              * Update groupLeader pointer to point to the new proc.  (We'd
    3527                 :              * better not be a member of somebody else's lock group!)
    3528                 :              */
    3529 GIC         796 :             Assert(proclock->groupLeader == proclock->tag.myProc);
    3530             796 :             proclock->groupLeader = newproc;
    3531                 : 
    3532                 :             /*
    3533                 :              * Update the proclock.  We should not find any existing entry for
    3534                 :              * the same hash key, since there can be only one entry for any
    3535                 :              * given lock with my own proc.
    3536                 :              */
    3537             796 :             if (!hash_update_hash_key(LockMethodProcLockHash,
    3538                 :                                       proclock,
    3539                 :                                       &proclocktag))
    3540 UIC           0 :                 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
    3541                 : 
    3542                 :             /* Re-link into the new proc's proclock list */
    3543 GNC         796 :             dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
    3544                 : 
    3545                 :             PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
    3546 ECB             :         }                       /* loop over PROCLOCKs within this partition */
    3547                 : 
    3548 GIC         800 :         LWLockRelease(partitionLock);
    3549                 :     }                           /* loop over partitions */
    3550                 : 
    3551             361 :     END_CRIT_SECTION();
    3552             361 : }
    3553                 : 
    3554                 : 
    3555 ECB             : /*
    3556                 :  * Estimate shared-memory space used for lock tables
    3557                 :  */
    3558                 : Size
    3559 CBC        2738 : LockShmemSize(void)
    3560 ECB             : {
    3561 GIC        2738 :     Size        size = 0;
    3562                 :     long        max_table_size;
    3563                 : 
    3564                 :     /* lock hash table */
    3565            2738 :     max_table_size = NLOCKENTS();
    3566            2738 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
    3567                 : 
    3568                 :     /* proclock hash table */
    3569            2738 :     max_table_size *= 2;
    3570            2738 :     size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
    3571                 : 
    3572                 :     /*
    3573                 :      * Since NLOCKENTS is only an estimate, add 10% safety margin.
    3574 ECB             :      */
    3575 GIC        2738 :     size = add_size(size, size / 10);
    3576 ECB             : 
    3577 GIC        2738 :     return size;
    3578                 : }
    3579 ECB             : 
    3580                 : /*
    3581                 :  * GetLockStatusData - Return a summary of the lock manager's internal
    3582                 :  * status, for use in a user-level reporting function.
    3583                 :  *
    3584                 :  * The return data consists of an array of LockInstanceData objects,
    3585                 :  * which are a lightly abstracted version of the PROCLOCK data structures,
    3586                 :  * i.e. there is one entry for each unique lock and interested PGPROC.
    3587                 :  * It is the caller's responsibility to match up related items (such as
    3588                 :  * references to the same lockable object or PGPROC) if wanted.
    3589                 :  *
    3590                 :  * The design goal is to hold the LWLocks for as short a time as possible;
    3591                 :  * thus, this function simply makes a copy of the necessary data and releases
    3592                 :  * the locks, allowing the caller to contemplate and format the data for as
    3593                 :  * long as it pleases.
    3594                 :  */
    3595                 : LockData *
    3596 GIC         264 : GetLockStatusData(void)
    3597 ECB             : {
    3598                 :     LockData   *data;
    3599                 :     PROCLOCK   *proclock;
    3600                 :     HASH_SEQ_STATUS seqstat;
    3601                 :     int         els;
    3602                 :     int         el;
    3603                 :     int         i;
    3604                 : 
    3605 CBC         264 :     data = (LockData *) palloc(sizeof(LockData));
    3606 ECB             : 
    3607                 :     /* Guess how much space we'll need. */
    3608 GIC         264 :     els = MaxBackends;
    3609             264 :     el = 0;
    3610             264 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
    3611                 : 
    3612 ECB             :     /*
    3613                 :      * First, we iterate through the per-backend fast-path arrays, locking
    3614                 :      * them one at a time.  This might produce an inconsistent picture of the
    3615                 :      * system state, but taking all of those LWLocks at the same time seems
    3616                 :      * impractical (in particular, note MAX_SIMUL_LWLOCKS).  It shouldn't
    3617                 :      * matter too much, because none of these locks can be involved in lock
    3618                 :      * conflicts anyway - anything that might must be present in the main lock
    3619                 :      * table.  (For the same reason, we don't sweat about making leaderPid
    3620                 :      * completely valid.  We cannot safely dereference another backend's
    3621                 :      * lockGroupLeader field without holding all lock partition locks, and
    3622                 :      * it's not worth that.)
    3623                 :      */
    3624 CBC       27652 :     for (i = 0; i < ProcGlobal->allProcCount; ++i)
    3625 ECB             :     {
    3626 CBC       27388 :         PGPROC     *proc = &ProcGlobal->allProcs[i];
    3627                 :         uint32      f;
    3628                 : 
    3629           27388 :         LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
    3630 ECB             : 
    3631 GIC      465596 :         for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
    3632 ECB             :         {
    3633                 :             LockInstanceData *instance;
    3634 CBC      438208 :             uint32      lockbits = FAST_PATH_GET_BITS(proc, f);
    3635 ECB             : 
    3636                 :             /* Skip unallocated slots. */
    3637 CBC      438208 :             if (!lockbits)
    3638          434298 :                 continue;
    3639 ECB             : 
    3640 CBC        3910 :             if (el >= els)
    3641 ECB             :             {
    3642 GIC          15 :                 els += MaxBackends;
    3643 CBC          15 :                 data->locks = (LockInstanceData *)
    3644 GIC          15 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3645                 :             }
    3646 ECB             : 
    3647 GIC        3910 :             instance = &data->locks[el];
    3648            3910 :             SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
    3649                 :                                  proc->fpRelId[f]);
    3650            3910 :             instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
    3651            3910 :             instance->waitLockMode = NoLock;
    3652            3910 :             instance->backend = proc->backendId;
    3653            3910 :             instance->lxid = proc->lxid;
    3654            3910 :             instance->pid = proc->pid;
    3655            3910 :             instance->leaderPid = proc->pid;
    3656            3910 :             instance->fastpath = true;
    3657                 : 
    3658                 :             /*
    3659                 :              * Successfully taking fast path lock means there were no
    3660                 :              * conflicting locks.
    3661                 :              */
    3662 CBC        3910 :             instance->waitStart = 0;
    3663 ECB             : 
    3664 GIC        3910 :             el++;
    3665                 :         }
    3666 ECB             : 
    3667 CBC       27388 :         if (proc->fpVXIDLock)
    3668                 :         {
    3669 ECB             :             VirtualTransactionId vxid;
    3670                 :             LockInstanceData *instance;
    3671                 : 
    3672 GIC        1250 :             if (el >= els)
    3673                 :             {
    3674               3 :                 els += MaxBackends;
    3675 CBC           3 :                 data->locks = (LockInstanceData *)
    3676 GIC           3 :                     repalloc(data->locks, sizeof(LockInstanceData) * els);
    3677 ECB             :             }
    3678                 : 
    3679 CBC        1250 :             vxid.backendId = proc->backendId;
    3680            1250 :             vxid.localTransactionId = proc->fpLocalTransactionId;
    3681 ECB             : 
    3682 GIC        1250 :             instance = &data->locks[el];
    3683 CBC        1250 :             SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
    3684            1250 :             instance->holdMask = LOCKBIT_ON(ExclusiveLock);
    3685            1250 :             instance->waitLockMode = NoLock;
    3686            1250 :             instance->backend = proc->backendId;
    3687 GIC        1250 :             instance->lxid = proc->lxid;
    3688 CBC        1250 :             instance->pid = proc->pid;
    3689            1250 :             instance->leaderPid = proc->pid;
    3690            1250 :             instance->fastpath = true;
    3691            1250 :             instance->waitStart = 0;
    3692 ECB             : 
    3693 CBC        1250 :             el++;
    3694 ECB             :         }
    3695                 : 
    3696 CBC       27388 :         LWLockRelease(&proc->fpInfoLock);
    3697                 :     }
    3698                 : 
    3699                 :     /*
    3700                 :      * Next, acquire lock on the entire shared lock data structure.  We do
    3701                 :      * this so that, at least for locks in the primary lock table, the state
    3702                 :      * will be self-consistent.
    3703                 :      *
    3704                 :      * Since this is a read-only operation, we take shared instead of
    3705                 :      * exclusive lock.  There's not a whole lot of point to this, because all
    3706 ECB             :      * the normal operations require exclusive lock, but it doesn't hurt
    3707                 :      * anything either. It will at least allow two backends to do
    3708                 :      * GetLockStatusData in parallel.
    3709                 :      *
    3710                 :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3711                 :      */
    3712 GIC        4488 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3713            4224 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3714                 : 
    3715                 :     /* Now we can safely count the number of proclocks */
    3716             264 :     data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
    3717             264 :     if (data->nelements > els)
    3718                 :     {
    3719              13 :         els = data->nelements;
    3720              13 :         data->locks = (LockInstanceData *)
    3721              13 :             repalloc(data->locks, sizeof(LockInstanceData) * els);
    3722                 :     }
    3723                 : 
    3724                 :     /* Now scan the tables to copy the data */
    3725             264 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3726                 : 
    3727            2964 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    3728                 :     {
    3729            2700 :         PGPROC     *proc = proclock->tag.myProc;
    3730            2700 :         LOCK       *lock = proclock->tag.myLock;
    3731            2700 :         LockInstanceData *instance = &data->locks[el];
    3732                 : 
    3733            2700 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3734            2700 :         instance->holdMask = proclock->holdMask;
    3735            2700 :         if (proc->waitLock == proclock->tag.myLock)
    3736               8 :             instance->waitLockMode = proc->waitLockMode;
    3737                 :         else
    3738 CBC        2692 :             instance->waitLockMode = NoLock;
    3739 GIC        2700 :         instance->backend = proc->backendId;
    3740            2700 :         instance->lxid = proc->lxid;
    3741            2700 :         instance->pid = proc->pid;
    3742            2700 :         instance->leaderPid = proclock->groupLeader->pid;
    3743            2700 :         instance->fastpath = false;
    3744 CBC        2700 :         instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
    3745                 : 
    3746 GIC        2700 :         el++;
    3747                 :     }
    3748                 : 
    3749                 :     /*
    3750                 :      * And release locks.  We do this in reverse order for two reasons: (1)
    3751                 :      * Anyone else who needs more than one of the locks will be trying to lock
    3752 ECB             :      * them in increasing order; we don't want to release the other process
    3753                 :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    3754                 :      * behavior inside LWLockRelease.
    3755                 :      */
    3756 CBC        4488 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3757 GIC        4224 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    3758                 : 
    3759             264 :     Assert(el == data->nelements);
    3760                 : 
    3761             264 :     return data;
    3762                 : }
    3763                 : 
    3764                 : /*
    3765                 :  * GetBlockerStatusData - Return a summary of the lock manager's state
    3766                 :  * concerning locks that are blocking the specified PID or any member of
    3767                 :  * the PID's lock group, for use in a user-level reporting function.
    3768                 :  *
    3769 ECB             :  * For each PID within the lock group that is awaiting some heavyweight lock,
    3770                 :  * the return data includes an array of LockInstanceData objects, which are
    3771                 :  * the same data structure used by GetLockStatusData; but unlike that function,
    3772                 :  * this one reports only the PROCLOCKs associated with the lock that that PID
    3773                 :  * is blocked on.  (Hence, all the locktags should be the same for any one
    3774                 :  * blocked PID.)  In addition, we return an array of the PIDs of those backends
    3775                 :  * that are ahead of the blocked PID in the lock's wait queue.  These can be
    3776                 :  * compared with the PIDs in the LockInstanceData objects to determine which
    3777                 :  * waiters are ahead of or behind the blocked PID in the queue.
    3778                 :  *
    3779                 :  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
    3780                 :  * waiting on any heavyweight lock, return empty arrays.
    3781                 :  *
    3782                 :  * The design goal is to hold the LWLocks for as short a time as possible;
    3783                 :  * thus, this function simply makes a copy of the necessary data and releases
    3784                 :  * the locks, allowing the caller to contemplate and format the data for as
    3785                 :  * long as it pleases.
    3786                 :  */
    3787                 : BlockedProcsData *
    3788 GIC        3192 : GetBlockerStatusData(int blocked_pid)
    3789                 : {
    3790                 :     BlockedProcsData *data;
    3791                 :     PGPROC     *proc;
    3792                 :     int         i;
    3793 ECB             : 
    3794 GIC        3192 :     data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
    3795                 : 
    3796                 :     /*
    3797 ECB             :      * Guess how much space we'll need, and preallocate.  Most of the time
    3798                 :      * this will avoid needing to do repalloc while holding the LWLocks.  (We
    3799                 :      * assume, but check with an Assert, that MaxBackends is enough entries
    3800                 :      * for the procs[] array; the other two could need enlargement, though.)
    3801                 :      */
    3802 GIC        3192 :     data->nprocs = data->nlocks = data->npids = 0;
    3803            3192 :     data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
    3804            3192 :     data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
    3805 CBC        3192 :     data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
    3806            3192 :     data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
    3807                 : 
    3808 ECB             :     /*
    3809                 :      * In order to search the ProcArray for blocked_pid and assume that that
    3810                 :      * entry won't immediately disappear under us, we must hold ProcArrayLock.
    3811                 :      * In addition, to examine the lock grouping fields of any other backend,
    3812                 :      * we must hold all the hash partition locks.  (Only one of those locks is
    3813                 :      * actually relevant for any one lock group, but we can't know which one
    3814                 :      * ahead of time.)  It's fairly annoying to hold all those locks
    3815                 :      * throughout this, but it's no worse than GetLockStatusData(), and it
    3816                 :      * does have the advantage that we're guaranteed to return a
    3817                 :      * self-consistent instantaneous state.
    3818                 :      */
    3819 GIC        3192 :     LWLockAcquire(ProcArrayLock, LW_SHARED);
    3820 ECB             : 
    3821 GIC        3192 :     proc = BackendPidGetProcWithLock(blocked_pid);
    3822                 : 
    3823                 :     /* Nothing to do if it's gone */
    3824            3192 :     if (proc != NULL)
    3825                 :     {
    3826                 :         /*
    3827                 :          * Acquire lock on the entire shared lock data structure.  See notes
    3828 ECB             :          * in GetLockStatusData().
    3829                 :          */
    3830 GIC       54264 :         for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3831           51072 :             LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3832 ECB             : 
    3833 CBC        3192 :         if (proc->lockGroupLeader == NULL)
    3834 ECB             :         {
    3835                 :             /* Easy case, proc is not a lock group member */
    3836 GIC        3035 :             GetSingleProcBlockerStatusData(proc, data);
    3837                 :         }
    3838                 :         else
    3839                 :         {
    3840                 :             /* Examine all procs in proc's lock group */
    3841                 :             dlist_iter  iter;
    3842                 : 
    3843 CBC         404 :             dlist_foreach(iter, &proc->lockGroupLeader->lockGroupMembers)
    3844                 :             {
    3845 ECB             :                 PGPROC     *memberProc;
    3846                 : 
    3847 CBC         247 :                 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
    3848             247 :                 GetSingleProcBlockerStatusData(memberProc, data);
    3849                 :             }
    3850                 :         }
    3851 ECB             : 
    3852                 :         /*
    3853 EUB             :          * And release locks.  See notes in GetLockStatusData().
    3854                 :          */
    3855 GBC       54264 :         for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    3856 GIC       51072 :             LWLockRelease(LockHashPartitionLockByIndex(i));
    3857                 : 
    3858 CBC        3192 :         Assert(data->nprocs <= data->maxprocs);
    3859 ECB             :     }
    3860                 : 
    3861 CBC        3192 :     LWLockRelease(ProcArrayLock);
    3862 ECB             : 
    3863 GIC        3192 :     return data;
    3864 ECB             : }
    3865                 : 
    3866                 : /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
    3867                 : static void
    3868 CBC        3282 : GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
    3869 ECB             : {
    3870 CBC        3282 :     LOCK       *theLock = blocked_proc->waitLock;
    3871                 :     BlockedProcData *bproc;
    3872                 :     dlist_iter  proclock_iter;
    3873                 :     dlist_iter  proc_iter;
    3874                 :     dclist_head *waitQueue;
    3875                 :     int         queue_size;
    3876                 : 
    3877 EUB             :     /* Nothing to do if this proc is not blocked */
    3878 GIC        3282 :     if (theLock == NULL)
    3879 GBC        2193 :         return;
    3880 EUB             : 
    3881                 :     /* Set up a procs[] element */
    3882 GIC        1089 :     bproc = &data->procs[data->nprocs++];
    3883            1089 :     bproc->pid = blocked_proc->pid;
    3884 CBC        1089 :     bproc->first_lock = data->nlocks;
    3885 GIC        1089 :     bproc->first_waiter = data->npids;
    3886 ECB             : 
    3887                 :     /*
    3888                 :      * We may ignore the proc's fast-path arrays, since nothing in those could
    3889                 :      * be related to a contended lock.
    3890                 :      */
    3891                 : 
    3892                 :     /* Collect all PROCLOCKs associated with theLock */
    3893 GNC        3312 :     dlist_foreach(proclock_iter, &theLock->procLocks)
    3894                 :     {
    3895            2223 :         PROCLOCK   *proclock =
    3896            2223 :             dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
    3897 GIC        2223 :         PGPROC     *proc = proclock->tag.myProc;
    3898            2223 :         LOCK       *lock = proclock->tag.myLock;
    3899                 :         LockInstanceData *instance;
    3900                 : 
    3901            2223 :         if (data->nlocks >= data->maxlocks)
    3902                 :         {
    3903 UIC           0 :             data->maxlocks += MaxBackends;
    3904               0 :             data->locks = (LockInstanceData *)
    3905               0 :                 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
    3906                 :         }
    3907                 : 
    3908 GIC        2223 :         instance = &data->locks[data->nlocks];
    3909 CBC        2223 :         memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
    3910 GIC        2223 :         instance->holdMask = proclock->holdMask;
    3911            2223 :         if (proc->waitLock == lock)
    3912            1126 :             instance->waitLockMode = proc->waitLockMode;
    3913                 :         else
    3914            1097 :             instance->waitLockMode = NoLock;
    3915            2223 :         instance->backend = proc->backendId;
    3916            2223 :         instance->lxid = proc->lxid;
    3917            2223 :         instance->pid = proc->pid;
    3918            2223 :         instance->leaderPid = proclock->groupLeader->pid;
    3919            2223 :         instance->fastpath = false;
    3920            2223 :         data->nlocks++;
    3921 ECB             :     }
    3922                 : 
    3923                 :     /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
    3924 CBC        1089 :     waitQueue = &(theLock->waitProcs);
    3925 GNC        1089 :     queue_size = dclist_count(waitQueue);
    3926                 : 
    3927 GIC        1089 :     if (queue_size > data->maxpids - data->npids)
    3928                 :     {
    3929 UIC           0 :         data->maxpids = Max(data->maxpids + MaxBackends,
    3930 ECB             :                             data->npids + queue_size);
    3931 UIC           0 :         data->waiter_pids = (int *) repalloc(data->waiter_pids,
    3932               0 :                                              sizeof(int) * data->maxpids);
    3933 ECB             :     }
    3934                 : 
    3935                 :     /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
    3936 GNC        1107 :     dclist_foreach(proc_iter, waitQueue)
    3937                 :     {
    3938            1107 :         PGPROC     *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
    3939            1107 :         if (queued_proc == blocked_proc)
    3940 GIC        1089 :             break;
    3941 GNC          18 :         data->waiter_pids[data->npids++] = queued_proc->pid;
    3942              18 :         queued_proc = (PGPROC *) queued_proc->links.next;
    3943 ECB             :     }
    3944                 : 
    3945 GIC        1089 :     bproc->num_locks = data->nlocks - bproc->first_lock;
    3946 CBC        1089 :     bproc->num_waiters = data->npids - bproc->first_waiter;
    3947 ECB             : }
    3948                 : 
    3949                 : /*
    3950                 :  * Returns a list of currently held AccessExclusiveLocks, for use by
    3951                 :  * LogStandbySnapshot().  The result is a palloc'd array,
    3952                 :  * with the number of elements returned into *nlocks.
    3953                 :  *
    3954                 :  * XXX This currently takes a lock on all partitions of the lock table,
    3955                 :  * but it's possible to do better.  By reference counting locks and storing
    3956                 :  * the value in the ProcArray entry for each backend we could tell if any
    3957                 :  * locks need recording without having to acquire the partition locks and
    3958                 :  * scan the lock table.  Whether that's worth the additional overhead
    3959                 :  * is pretty dubious though.
    3960                 :  */
    3961                 : xl_standby_lock *
    3962 CBC        1749 : GetRunningTransactionLocks(int *nlocks)
    3963 ECB             : {
    3964                 :     xl_standby_lock *accessExclusiveLocks;
    3965                 :     PROCLOCK   *proclock;
    3966                 :     HASH_SEQ_STATUS seqstat;
    3967                 :     int         i;
    3968                 :     int         index;
    3969                 :     int         els;
    3970                 : 
    3971                 :     /*
    3972                 :      * Acquire lock on the entire shared lock data structure.
    3973                 :      *
    3974                 :      * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
    3975                 :      */
    3976 GIC       29733 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    3977           27984 :         LWLockAcquire(LockHashPartitionLockByIndex(i), LW_SHARED);
    3978                 : 
    3979 ECB             :     /* Now we can safely count the number of proclocks */
    3980 CBC        1749 :     els = hash_get_num_entries(LockMethodProcLockHash);
    3981                 : 
    3982 ECB             :     /*
    3983                 :      * Allocating enough space for all locks in the lock table is overkill,
    3984                 :      * but it's more convenient and faster than having to enlarge the array.
    3985                 :      */
    3986 GIC        1749 :     accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
    3987                 : 
    3988 ECB             :     /* Now scan the tables to copy the data */
    3989 GIC        1749 :     hash_seq_init(&seqstat, LockMethodProcLockHash);
    3990 ECB             : 
    3991                 :     /*
    3992                 :      * If lock is a currently granted AccessExclusiveLock then it will have
    3993                 :      * just one proclock holder, so locks are never accessed twice in this
    3994                 :      * particular case. Don't copy this code for use elsewhere because in the
    3995                 :      * general case this will give you duplicate locks when looking at
    3996                 :      * non-exclusive lock types.
    3997                 :      */
    3998 GIC        1749 :     index = 0;
    3999            5840 :     while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
    4000                 :     {
    4001                 :         /* make sure this definition matches the one used in LockAcquire */
    4002            4091 :         if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
    4003             160 :             proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
    4004                 :         {
    4005             115 :             PGPROC     *proc = proclock->tag.myProc;
    4006             115 :             LOCK       *lock = proclock->tag.myLock;
    4007             115 :             TransactionId xid = proc->xid;
    4008                 : 
    4009                 :             /*
    4010                 :              * Don't record locks for transactions if we know they have
    4011                 :              * already issued their WAL record for commit but not yet released
    4012                 :              * lock. It is still possible that we see locks held by already
    4013                 :              * complete transactions, if they haven't yet zeroed their xids.
    4014                 :              */
    4015             115 :             if (!TransactionIdIsValid(xid))
    4016               1 :                 continue;
    4017                 : 
    4018             114 :             accessExclusiveLocks[index].xid = xid;
    4019             114 :             accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
    4020             114 :             accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
    4021                 : 
    4022             114 :             index++;
    4023                 :         }
    4024                 :     }
    4025                 : 
    4026            1749 :     Assert(index <= els);
    4027                 : 
    4028                 :     /*
    4029                 :      * And release locks.  We do this in reverse order for two reasons: (1)
    4030                 :      * Anyone else who needs more than one of the locks will be trying to lock
    4031                 :      * them in increasing order; we don't want to release the other process
    4032                 :      * until it can get all the locks it needs. (2) This avoids O(N^2)
    4033                 :      * behavior inside LWLockRelease.
    4034                 :      */
    4035           29733 :     for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
    4036           27984 :         LWLockRelease(LockHashPartitionLockByIndex(i));
    4037                 : 
    4038            1749 :     *nlocks = index;
    4039            1749 :     return accessExclusiveLocks;
    4040                 : }
    4041                 : 
    4042                 : /* Provide the textual name of any lock mode */
    4043                 : const char *
    4044            8169 : GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
    4045                 : {
    4046            8169 :     Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
    4047            8169 :     Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
    4048            8169 :     return LockMethods[lockmethodid]->lockModeNames[mode];
    4049                 : }
    4050                 : 
    4051                 : #ifdef LOCK_DEBUG
    4052                 : /*
    4053                 :  * Dump all locks in the given proc's myProcLocks lists.
    4054                 :  *
    4055                 :  * Caller is responsible for having acquired appropriate LWLocks.
    4056                 :  */
    4057                 : void
    4058                 : DumpLocks(PGPROC *proc)
    4059                 : {
    4060                 :     int         i;
    4061                 : 
    4062                 :     if (proc == NULL)
    4063                 :         return;
    4064                 : 
    4065                 :     if (proc->waitLock)
    4066                 :         LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
    4067                 : 
    4068                 :     for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
    4069                 :     {
    4070                 :         dlist_head *procLocks = &proc->myProcLocks[i];
    4071                 :         dlist_iter  iter;
    4072                 : 
    4073                 :         dlist_foreach(iter, procLocks)
    4074                 :         {
    4075                 :             PROCLOCK   *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
    4076                 :             LOCK       *lock = proclock->tag.myLock;
    4077                 : 
    4078                 :             Assert(proclock->tag.myProc == proc);
    4079                 :             PROCLOCK_PRINT("DumpLocks", proclock);
    4080                 :             LOCK_PRINT("DumpLocks", lock, 0);
    4081                 :         }
    4082                 :     }
    4083 ECB             : }
    4084                 : 
    4085                 : /*
    4086                 :  * Dump all lmgr locks.
    4087                 :  *
    4088                 :  * Caller is responsible for having acquired appropriate LWLocks.
    4089                 :  */
    4090                 : void
    4091                 : DumpAllLocks(void)
    4092                 : {
    4093                 :     PGPROC     *proc;
    4094                 :     PROCLOCK   *proclock;
    4095                 :     LOCK       *lock;
    4096                 :     HASH_SEQ_STATUS status;
    4097                 : 
    4098                 :     proc = MyProc;
    4099                 : 
    4100                 :     if (proc && proc->waitLock)
    4101                 :         LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
    4102                 : 
    4103                 :     hash_seq_init(&status, LockMethodProcLockHash);
    4104                 : 
    4105                 :     while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
    4106                 :     {
    4107 EUB             :         PROCLOCK_PRINT("DumpAllLocks", proclock);
    4108 ECB             : 
    4109                 :         lock = proclock->tag.myLock;
    4110                 :         if (lock)
    4111                 :             LOCK_PRINT("DumpAllLocks", lock, 0);
    4112                 :         else
    4113                 :             elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
    4114                 :     }
    4115                 : }
    4116                 : #endif                          /* LOCK_DEBUG */
    4117                 : 
    4118                 : /*
    4119                 :  * LOCK 2PC resource manager's routines
    4120                 :  */
    4121                 : 
    4122                 : /*
    4123                 :  * Re-acquire a lock belonging to a transaction that was prepared.
    4124                 :  *
    4125                 :  * Because this function is run at db startup, re-acquiring the locks should
    4126 EUB             :  * never conflict with running transactions because there are none.  We
    4127                 :  * assume that the lock state represented by the stored 2PC files is legal.
    4128                 :  *
    4129                 :  * When switching from Hot Standby mode to normal operation, the locks will
    4130                 :  * be already held by the startup process. The locks are acquired for the new
    4131                 :  * procs without checking for conflicts, so we don't get a conflict between the
    4132                 :  * startup process and the dummy procs, even though we will momentarily have
    4133                 :  * a situation where two procs are holding the same AccessExclusiveLock,
    4134                 :  * which isn't normally possible because the conflict. If we're in standby
    4135                 :  * mode, but a recovery snapshot hasn't been established yet, it's possible
    4136 ECB             :  * that some but not all of the locks are already held by the startup process.
    4137                 :  *
    4138                 :  * This approach is simple, but also a bit dangerous, because if there isn't
    4139                 :  * enough shared memory to acquire the locks, an error will be thrown, which
    4140                 :  * is promoted to FATAL and recovery will abort, bringing down postmaster.
    4141                 :  * A safer approach would be to transfer the locks like we do in
    4142                 :  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
    4143                 :  * read-only backends to use up all the shared lock memory anyway, so that
    4144                 :  * replaying the WAL record that needs to acquire a lock will throw an error
    4145                 :  * and PANIC anyway.
    4146                 :  */
    4147                 : void
    4148 GIC          79 : lock_twophase_recover(TransactionId xid, uint16 info,
    4149                 :                       void *recdata, uint32 len)
    4150                 : {
    4151 CBC          79 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4152              79 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, false);
    4153 ECB             :     LOCKTAG    *locktag;
    4154                 :     LOCKMODE    lockmode;
    4155                 :     LOCKMETHODID lockmethodid;
    4156                 :     LOCK       *lock;
    4157                 :     PROCLOCK   *proclock;
    4158                 :     PROCLOCKTAG proclocktag;
    4159                 :     bool        found;
    4160                 :     uint32      hashcode;
    4161                 :     uint32      proclock_hashcode;
    4162                 :     int         partition;
    4163                 :     LWLock     *partitionLock;
    4164                 :     LockMethod  lockMethodTable;
    4165                 : 
    4166 GIC          79 :     Assert(len == sizeof(TwoPhaseLockRecord));
    4167 CBC          79 :     locktag = &rec->locktag;
    4168 GIC          79 :     lockmode = rec->lockmode;
    4169              79 :     lockmethodid = locktag->locktag_lockmethodid;
    4170                 : 
    4171              79 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4172 LBC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4173 GIC          79 :     lockMethodTable = LockMethods[lockmethodid];
    4174                 : 
    4175 GBC          79 :     hashcode = LockTagHashCode(locktag);
    4176 GIC          79 :     partition = LockHashPartition(hashcode);
    4177              79 :     partitionLock = LockHashPartitionLock(hashcode);
    4178                 : 
    4179              79 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4180                 : 
    4181                 :     /*
    4182                 :      * Find or create a lock with this tag.
    4183 EUB             :      */
    4184 GBC          79 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4185                 :                                                 locktag,
    4186                 :                                                 hashcode,
    4187                 :                                                 HASH_ENTER_NULL,
    4188                 :                                                 &found);
    4189              79 :     if (!lock)
    4190                 :     {
    4191 UBC           0 :         LWLockRelease(partitionLock);
    4192               0 :         ereport(ERROR,
    4193                 :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4194                 :                  errmsg("out of shared memory"),
    4195                 :                  errhint("You might need to increase max_locks_per_transaction.")));
    4196                 :     }
    4197                 : 
    4198                 :     /*
    4199                 :      * if it's a new lock object, initialize it
    4200                 :      */
    4201 CBC          79 :     if (!found)
    4202                 :     {
    4203              71 :         lock->grantMask = 0;
    4204              71 :         lock->waitMask = 0;
    4205 GNC          71 :         dlist_init(&lock->procLocks);
    4206              71 :         dclist_init(&lock->waitProcs);
    4207 GIC          71 :         lock->nRequested = 0;
    4208 CBC          71 :         lock->nGranted = 0;
    4209             426 :         MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
    4210 GIC          71 :         MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
    4211                 :         LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
    4212                 :     }
    4213                 :     else
    4214                 :     {
    4215                 :         LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
    4216 CBC           8 :         Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
    4217 GIC           8 :         Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
    4218               8 :         Assert(lock->nGranted <= lock->nRequested);
    4219                 :     }
    4220                 : 
    4221                 :     /*
    4222                 :      * Create the hash key for the proclock table.
    4223 ECB             :      */
    4224 CBC          79 :     proclocktag.myLock = lock;
    4225              79 :     proclocktag.myProc = proc;
    4226                 : 
    4227 GIC          79 :     proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
    4228                 : 
    4229                 :     /*
    4230 ECB             :      * Find or create a proclock entry with this tag
    4231 EUB             :      */
    4232 GIC          79 :     proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
    4233                 :                                                         &proclocktag,
    4234                 :                                                         proclock_hashcode,
    4235                 :                                                         HASH_ENTER_NULL,
    4236                 :                                                         &found);
    4237              79 :     if (!proclock)
    4238                 :     {
    4239                 :         /* Oops, not enough shmem for the proclock */
    4240 UIC           0 :         if (lock->nRequested == 0)
    4241 ECB             :         {
    4242                 :             /*
    4243                 :              * There are no other requestors of this lock, so garbage-collect
    4244                 :              * the lock object.  We *must* do this to avoid a permanent leak
    4245                 :              * of shared memory, because there won't be anything to cause
    4246                 :              * anyone to release the lock object later.
    4247                 :              */
    4248 UNC           0 :             Assert(dlist_is_empty(&lock->procLocks));
    4249 LBC           0 :             if (!hash_search_with_hash_value(LockMethodLockHash,
    4250 UNC           0 :                                              &(lock->tag),
    4251 ECB             :                                              hashcode,
    4252                 :                                              HASH_REMOVE,
    4253                 :                                              NULL))
    4254 UIC           0 :                 elog(PANIC, "lock table corrupted");
    4255                 :         }
    4256 LBC           0 :         LWLockRelease(partitionLock);
    4257               0 :         ereport(ERROR,
    4258                 :                 (errcode(ERRCODE_OUT_OF_MEMORY),
    4259                 :                  errmsg("out of shared memory"),
    4260                 :                  errhint("You might need to increase max_locks_per_transaction.")));
    4261                 :     }
    4262                 : 
    4263                 :     /*
    4264 EUB             :      * If new, initialize the new entry
    4265                 :      */
    4266 GIC          79 :     if (!found)
    4267 EUB             :     {
    4268 GIC          75 :         Assert(proc->lockGroupLeader == NULL);
    4269              75 :         proclock->groupLeader = proc;
    4270              75 :         proclock->holdMask = 0;
    4271              75 :         proclock->releaseMask = 0;
    4272 EUB             :         /* Add proclock to appropriate lists */
    4273 GNC          75 :         dlist_push_tail(&lock->procLocks, &proclock->lockLink);
    4274              75 :         dlist_push_tail(&proc->myProcLocks[partition],
    4275                 :                         &proclock->procLink);
    4276                 :         PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
    4277 EUB             :     }
    4278                 :     else
    4279                 :     {
    4280                 :         PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
    4281 GBC           4 :         Assert((proclock->holdMask & ~lock->grantMask) == 0);
    4282                 :     }
    4283 EUB             : 
    4284                 :     /*
    4285                 :      * lock->nRequested and lock->requested[] count the total number of
    4286                 :      * requests, whether granted or waiting, so increment those immediately.
    4287                 :      */
    4288 GIC          79 :     lock->nRequested++;
    4289              79 :     lock->requested[lockmode]++;
    4290              79 :     Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
    4291                 : 
    4292                 :     /*
    4293                 :      * We shouldn't already hold the desired lock.
    4294                 :      */
    4295              79 :     if (proclock->holdMask & LOCKBIT_ON(lockmode))
    4296 LBC           0 :         elog(ERROR, "lock %s on object %u/%u/%u is already held",
    4297                 :              lockMethodTable->lockModeNames[lockmode],
    4298                 :              lock->tag.locktag_field1, lock->tag.locktag_field2,
    4299 ECB             :              lock->tag.locktag_field3);
    4300                 : 
    4301                 :     /*
    4302                 :      * We ignore any possible conflicts and just grant ourselves the lock. Not
    4303                 :      * only because we don't bother, but also to avoid deadlocks when
    4304                 :      * switching from standby to normal mode. See function comment.
    4305                 :      */
    4306 CBC          79 :     GrantLock(lock, proclock, lockmode);
    4307 ECB             : 
    4308                 :     /*
    4309                 :      * Bump strong lock count, to make sure any fast-path lock requests won't
    4310 EUB             :      * be granted without consulting the primary lock table.
    4311 ECB             :      */
    4312 GIC          79 :     if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
    4313 ECB             :     {
    4314 CBC          13 :         uint32      fasthashcode = FastPathStrongLockHashPartition(hashcode);
    4315                 : 
    4316 GIC          13 :         SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
    4317              13 :         FastPathStrongRelationLocks->count[fasthashcode]++;
    4318              13 :         SpinLockRelease(&FastPathStrongRelationLocks->mutex);
    4319                 :     }
    4320                 : 
    4321              79 :     LWLockRelease(partitionLock);
    4322 CBC          79 : }
    4323                 : 
    4324                 : /*
    4325 ECB             :  * Re-acquire a lock belonging to a transaction that was prepared, when
    4326                 :  * starting up into hot standby mode.
    4327                 :  */
    4328                 : void
    4329 UIC           0 : lock_twophase_standby_recover(TransactionId xid, uint16 info,
    4330                 :                               void *recdata, uint32 len)
    4331                 : {
    4332               0 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4333                 :     LOCKTAG    *locktag;
    4334                 :     LOCKMODE    lockmode;
    4335                 :     LOCKMETHODID lockmethodid;
    4336                 : 
    4337               0 :     Assert(len == sizeof(TwoPhaseLockRecord));
    4338               0 :     locktag = &rec->locktag;
    4339               0 :     lockmode = rec->lockmode;
    4340               0 :     lockmethodid = locktag->locktag_lockmethodid;
    4341                 : 
    4342               0 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4343               0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4344                 : 
    4345               0 :     if (lockmode == AccessExclusiveLock &&
    4346 LBC           0 :         locktag->locktag_type == LOCKTAG_RELATION)
    4347                 :     {
    4348               0 :         StandbyAcquireAccessExclusiveLock(xid,
    4349                 :                                           locktag->locktag_field1 /* dboid */ ,
    4350 ECB             :                                           locktag->locktag_field2 /* reloid */ );
    4351                 :     }
    4352 LBC           0 : }
    4353 ECB             : 
    4354                 : 
    4355                 : /*
    4356                 :  * 2PC processing routine for COMMIT PREPARED case.
    4357                 :  *
    4358                 :  * Find and release the lock indicated by the 2PC record.
    4359                 :  */
    4360                 : void
    4361 GIC         850 : lock_twophase_postcommit(TransactionId xid, uint16 info,
    4362                 :                          void *recdata, uint32 len)
    4363                 : {
    4364             850 :     TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
    4365             850 :     PGPROC     *proc = TwoPhaseGetDummyProc(xid, true);
    4366                 :     LOCKTAG    *locktag;
    4367                 :     LOCKMETHODID lockmethodid;
    4368                 :     LockMethod  lockMethodTable;
    4369 ECB             : 
    4370 GIC         850 :     Assert(len == sizeof(TwoPhaseLockRecord));
    4371             850 :     locktag = &rec->locktag;
    4372             850 :     lockmethodid = locktag->locktag_lockmethodid;
    4373                 : 
    4374 CBC         850 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4375 UIC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4376 GIC         850 :     lockMethodTable = LockMethods[lockmethodid];
    4377                 : 
    4378             850 :     LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
    4379 CBC         850 : }
    4380                 : 
    4381 ECB             : /*
    4382                 :  * 2PC processing routine for ROLLBACK PREPARED case.
    4383                 :  *
    4384                 :  * This is actually just the same as the COMMIT case.
    4385                 :  */
    4386                 : void
    4387 GIC         121 : lock_twophase_postabort(TransactionId xid, uint16 info,
    4388                 :                         void *recdata, uint32 len)
    4389                 : {
    4390             121 :     lock_twophase_postcommit(xid, info, recdata, len);
    4391             121 : }
    4392 ECB             : 
    4393                 : /*
    4394                 :  *      VirtualXactLockTableInsert
    4395                 :  *
    4396                 :  *      Take vxid lock via the fast-path.  There can't be any pre-existing
    4397                 :  *      lockers, as we haven't advertised this vxid via the ProcArray yet.
    4398                 :  *
    4399                 :  *      Since MyProc->fpLocalTransactionId will normally contain the same data
    4400                 :  *      as MyProc->lxid, you might wonder if we really need both.  The
    4401                 :  *      difference is that MyProc->lxid is set and cleared unlocked, and
    4402                 :  *      examined by procarray.c, while fpLocalTransactionId is protected by
    4403                 :  *      fpInfoLock and is used only by the locking subsystem.  Doing it this
    4404                 :  *      way makes it easier to verify that there are no funny race conditions.
    4405                 :  *
    4406                 :  *      We don't bother recording this lock in the local lock table, since it's
    4407                 :  *      only ever released at the end of a transaction.  Instead,
    4408                 :  *      LockReleaseAll() calls VirtualXactLockTableCleanup().
    4409                 :  */
    4410                 : void
    4411 GIC      486242 : VirtualXactLockTableInsert(VirtualTransactionId vxid)
    4412                 : {
    4413          486242 :     Assert(VirtualTransactionIdIsValid(vxid));
    4414                 : 
    4415          486242 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4416                 : 
    4417          486242 :     Assert(MyProc->backendId == vxid.backendId);
    4418 CBC      486242 :     Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
    4419 GIC      486242 :     Assert(MyProc->fpVXIDLock == false);
    4420                 : 
    4421 CBC      486242 :     MyProc->fpVXIDLock = true;
    4422 GIC      486242 :     MyProc->fpLocalTransactionId = vxid.localTransactionId;
    4423                 : 
    4424 CBC      486242 :     LWLockRelease(&MyProc->fpInfoLock);
    4425          486242 : }
    4426                 : 
    4427                 : /*
    4428                 :  *      VirtualXactLockTableCleanup
    4429                 :  *
    4430                 :  *      Check whether a VXID lock has been materialized; if so, release it,
    4431                 :  *      unblocking waiters.
    4432                 :  */
    4433 ECB             : void
    4434 GIC      486557 : VirtualXactLockTableCleanup(void)
    4435 EUB             : {
    4436                 :     bool        fastpath;
    4437                 :     LocalTransactionId lxid;
    4438                 : 
    4439 GIC      486557 :     Assert(MyProc->backendId != InvalidBackendId);
    4440 ECB             : 
    4441                 :     /*
    4442                 :      * Clean up shared memory state.
    4443                 :      */
    4444 CBC      486557 :     LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
    4445 ECB             : 
    4446 GIC      486557 :     fastpath = MyProc->fpVXIDLock;
    4447          486557 :     lxid = MyProc->fpLocalTransactionId;
    4448          486557 :     MyProc->fpVXIDLock = false;
    4449 CBC      486557 :     MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
    4450 ECB             : 
    4451 CBC      486557 :     LWLockRelease(&MyProc->fpInfoLock);
    4452 EUB             : 
    4453 ECB             :     /*
    4454                 :      * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
    4455                 :      * that means someone transferred the lock to the main lock table.
    4456                 :      */
    4457 GIC      486557 :     if (!fastpath && LocalTransactionIdIsValid(lxid))
    4458                 :     {
    4459                 :         VirtualTransactionId vxid;
    4460                 :         LOCKTAG     locktag;
    4461                 : 
    4462             258 :         vxid.backendId = MyBackendId;
    4463             258 :         vxid.localTransactionId = lxid;
    4464             258 :         SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
    4465                 : 
    4466             258 :         LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
    4467                 :                              &locktag, ExclusiveLock, false);
    4468                 :     }
    4469 CBC      486557 : }
    4470                 : 
    4471                 : /*
    4472                 :  *      XactLockForVirtualXact
    4473 ECB             :  *
    4474                 :  * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
    4475                 :  * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid).  Unlike those
    4476                 :  * functions, it assumes "xid" is never a subtransaction and that "xid" is
    4477                 :  * prepared, committed, or aborted.
    4478                 :  *
    4479                 :  * If !TransactionIdIsValid(xid), this locks every prepared XID having been
    4480                 :  * known as "vxid" before its PREPARE TRANSACTION.
    4481                 :  */
    4482                 : static bool
    4483 GIC         305 : XactLockForVirtualXact(VirtualTransactionId vxid,
    4484                 :                        TransactionId xid, bool wait)
    4485                 : {
    4486             305 :     bool        more = false;
    4487                 : 
    4488                 :     /* There is no point to wait for 2PCs if you have no 2PCs. */
    4489             305 :     if (max_prepared_xacts == 0)
    4490             101 :         return true;
    4491 ECB             : 
    4492                 :     do
    4493                 :     {
    4494                 :         LockAcquireResult lar;
    4495                 :         LOCKTAG     tag;
    4496                 : 
    4497                 :         /* Clear state from previous iterations. */
    4498 GIC         204 :         if (more)
    4499                 :         {
    4500 LBC           0 :             xid = InvalidTransactionId;
    4501 UIC           0 :             more = false;
    4502 ECB             :         }
    4503                 : 
    4504                 :         /* If we have no xid, try to find one. */
    4505 GIC         204 :         if (!TransactionIdIsValid(xid))
    4506 CBC          98 :             xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
    4507             204 :         if (!TransactionIdIsValid(xid))
    4508                 :         {
    4509 GIC          89 :             Assert(!more);
    4510              89 :             return true;
    4511                 :         }
    4512                 : 
    4513                 :         /* Check or wait for XID completion. */
    4514 CBC         115 :         SET_LOCKTAG_TRANSACTION(tag, xid);
    4515 GIC         115 :         lar = LockAcquire(&tag, ShareLock, false, !wait);
    4516 CBC         115 :         if (lar == LOCKACQUIRE_NOT_AVAIL)
    4517 LBC           0 :             return false;
    4518 GIC         115 :         LockRelease(&tag, ShareLock, false);
    4519             115 :     } while (more);
    4520                 : 
    4521             115 :     return true;
    4522                 : }
    4523                 : 
    4524                 : /*
    4525 ECB             :  *      VirtualXactLock
    4526                 :  *
    4527                 :  * If wait = true, wait as long as the given VXID or any XID acquired by the
    4528                 :  * same transaction is still running.  Then, return true.
    4529                 :  *
    4530                 :  * If wait = false, just check whether that VXID or one of those XIDs is still
    4531                 :  * running, and return true or false.
    4532                 :  */
    4533                 : bool
    4534 CBC         345 : VirtualXactLock(VirtualTransactionId vxid, bool wait)
    4535                 : {
    4536 ECB             :     LOCKTAG     tag;
    4537                 :     PGPROC     *proc;
    4538 CBC         345 :     TransactionId xid = InvalidTransactionId;
    4539                 : 
    4540 GBC         345 :     Assert(VirtualTransactionIdIsValid(vxid));
    4541 EUB             : 
    4542 GBC         345 :     if (VirtualTransactionIdIsRecoveredPreparedXact(vxid))
    4543                 :         /* no vxid lock; localTransactionId is a normal, locked XID */
    4544 GIC           1 :         return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
    4545                 : 
    4546             344 :     SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
    4547 ECB             : 
    4548                 :     /*
    4549                 :      * If a lock table entry must be made, this is the PGPROC on whose behalf
    4550                 :      * it must be done.  Note that the transaction might end or the PGPROC
    4551                 :      * might be reassigned to a new backend before we get around to examining
    4552                 :      * it, but it doesn't matter.  If we find upon examination that the
    4553                 :      * relevant lxid is no longer running here, that's enough to prove that
    4554                 :      * it's no longer running anywhere.
    4555                 :      */
    4556 GIC         344 :     proc = BackendIdGetProc(vxid.backendId);
    4557             344 :     if (proc == NULL)
    4558               3 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4559                 : 
    4560                 :     /*
    4561                 :      * We must acquire this lock before checking the backendId and lxid
    4562 ECB             :      * against the ones we're waiting for.  The target backend will only set
    4563                 :      * or clear lxid while holding this lock.
    4564                 :      */
    4565 CBC         341 :     LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
    4566                 : 
    4567 GIC         341 :     if (proc->backendId != vxid.backendId
    4568 CBC         341 :         || proc->fpLocalTransactionId != vxid.localTransactionId)
    4569                 :     {
    4570 ECB             :         /* VXID ended */
    4571 CBC          62 :         LWLockRelease(&proc->fpInfoLock);
    4572 GIC          62 :         return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
    4573                 :     }
    4574                 : 
    4575                 :     /*
    4576                 :      * If we aren't asked to wait, there's no need to set up a lock table
    4577                 :      * entry.  The transaction is still in progress, so just return false.
    4578                 :      */
    4579             279 :     if (!wait)
    4580 ECB             :     {
    4581 GIC          15 :         LWLockRelease(&proc->fpInfoLock);
    4582 CBC          15 :         return false;
    4583                 :     }
    4584                 : 
    4585                 :     /*
    4586                 :      * OK, we're going to need to sleep on the VXID.  But first, we must set
    4587 ECB             :      * up the primary lock table entry, if needed (ie, convert the proc's
    4588                 :      * fast-path lock on its VXID to a regular lock).
    4589                 :      */
    4590 GBC         264 :     if (proc->fpVXIDLock)
    4591                 :     {
    4592 ECB             :         PROCLOCK   *proclock;
    4593                 :         uint32      hashcode;
    4594                 :         LWLock     *partitionLock;
    4595                 : 
    4596 CBC         258 :         hashcode = LockTagHashCode(&tag);
    4597                 : 
    4598 GIC         258 :         partitionLock = LockHashPartitionLock(hashcode);
    4599             258 :         LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4600                 : 
    4601 CBC         258 :         proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
    4602                 :                                     &tag, hashcode, ExclusiveLock);
    4603             258 :         if (!proclock)
    4604 ECB             :         {
    4605 UIC           0 :             LWLockRelease(partitionLock);
    4606 LBC           0 :             LWLockRelease(&proc->fpInfoLock);
    4607 UIC           0 :             ereport(ERROR,
    4608 ECB             :                     (errcode(ERRCODE_OUT_OF_MEMORY),
    4609                 :                      errmsg("out of shared memory"),
    4610                 :                      errhint("You might need to increase max_locks_per_transaction.")));
    4611                 :         }
    4612 GIC         258 :         GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
    4613                 : 
    4614             258 :         LWLockRelease(partitionLock);
    4615                 : 
    4616             258 :         proc->fpVXIDLock = false;
    4617                 :     }
    4618                 : 
    4619                 :     /*
    4620                 :      * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
    4621                 :      * search.  The proc might have assigned this XID but not yet locked it,
    4622                 :      * in which case the proc will lock this XID before releasing the VXID.
    4623                 :      * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
    4624                 :      * so we won't save an XID of a different VXID.  It doesn't matter whether
    4625                 :      * we save this before or after setting up the primary lock table entry.
    4626                 :      */
    4627             264 :     xid = proc->xid;
    4628                 : 
    4629                 :     /* Done with proc->fpLockBits */
    4630             264 :     LWLockRelease(&proc->fpInfoLock);
    4631                 : 
    4632                 :     /* Time to wait. */
    4633             264 :     (void) LockAcquire(&tag, ShareLock, false, false);
    4634                 : 
    4635             239 :     LockRelease(&tag, ShareLock, false);
    4636             239 :     return XactLockForVirtualXact(vxid, xid, wait);
    4637                 : }
    4638                 : 
    4639                 : /*
    4640                 :  * LockWaiterCount
    4641                 :  *
    4642                 :  * Find the number of lock requester on this locktag
    4643                 :  */
    4644                 : int
    4645          114052 : LockWaiterCount(const LOCKTAG *locktag)
    4646                 : {
    4647          114052 :     LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
    4648                 :     LOCK       *lock;
    4649                 :     bool        found;
    4650                 :     uint32      hashcode;
    4651                 :     LWLock     *partitionLock;
    4652          114052 :     int         waiters = 0;
    4653                 : 
    4654          114052 :     if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
    4655 UIC           0 :         elog(ERROR, "unrecognized lock method: %d", lockmethodid);
    4656                 : 
    4657 GIC      114052 :     hashcode = LockTagHashCode(locktag);
    4658          114052 :     partitionLock = LockHashPartitionLock(hashcode);
    4659          114052 :     LWLockAcquire(partitionLock, LW_EXCLUSIVE);
    4660                 : 
    4661          114052 :     lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
    4662                 :                                                 locktag,
    4663                 :                                                 hashcode,
    4664                 :                                                 HASH_FIND,
    4665                 :                                                 &found);
    4666          114052 :     if (found)
    4667                 :     {
    4668              36 :         Assert(lock != NULL);
    4669              36 :         waiters = lock->nRequested;
    4670                 :     }
    4671          114052 :     LWLockRelease(partitionLock);
    4672                 : 
    4673          114052 :     return waiters;
    4674                 : }
        

Generated by: LCOV version v1.16-55-g56c0a2a