LCOV - differential code coverage report
Current view: top level - src/backend/access/hash - hashpage.c (source / functions) Coverage Total Hit LBC UBC GIC GNC CBC EUB DCB
Current: Differential Code Coverage HEAD vs 15 Lines: 81.8 % 479 392 1 86 1 3 388 1 3
Current Date: 2023-04-08 17:13:01 Functions: 94.7 % 19 18 1 3 15
Baseline: 15 Line coverage date bins:
Baseline Date: 2023-04-08 15:09:40 [..60] days: 100.0 % 1 1 1
Legend: Lines: hit not hit (240..) days: 81.8 % 478 391 1 86 1 2 388 1
Function coverage date bins:
(240..) days: 94.7 % 19 18 1 3 15

 Age         Owner                  TLA  Line data    Source code
                                  1                 : /*-------------------------------------------------------------------------
                                  2                 :  *
                                  3                 :  * hashpage.c
                                  4                 :  *    Hash table page management code for the Postgres hash access method
                                  5                 :  *
                                  6                 :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
                                  7                 :  * Portions Copyright (c) 1994, Regents of the University of California
                                  8                 :  *
                                  9                 :  *
                                 10                 :  * IDENTIFICATION
                                 11                 :  *    src/backend/access/hash/hashpage.c
                                 12                 :  *
                                 13                 :  * NOTES
                                 14                 :  *    Postgres hash pages look like ordinary relation pages.  The opaque
                                 15                 :  *    data at high addresses includes information about the page including
                                 16                 :  *    whether a page is an overflow page or a true bucket, the bucket
                                 17                 :  *    number, and the block numbers of the preceding and following pages
                                 18                 :  *    in the same bucket.
                                 19                 :  *
                                 20                 :  *    The first page in a hash relation, page zero, is special -- it stores
                                 21                 :  *    information describing the hash table; it is referred to as the
                                 22                 :  *    "meta page." Pages one and higher store the actual data.
                                 23                 :  *
                                 24                 :  *    There are also bitmap pages, which are not manipulated here;
                                 25                 :  *    see hashovfl.c.
                                 26                 :  *
                                 27                 :  *-------------------------------------------------------------------------
                                 28                 :  */
                                 29                 : #include "postgres.h"
                                 30                 : 
                                 31                 : #include "access/hash.h"
                                 32                 : #include "access/hash_xlog.h"
                                 33                 : #include "access/xloginsert.h"
                                 34                 : #include "miscadmin.h"
                                 35                 : #include "port/pg_bitutils.h"
                                 36                 : #include "storage/lmgr.h"
                                 37                 : #include "storage/predicate.h"
                                 38                 : #include "storage/smgr.h"
                                 39                 : 
                                 40                 : static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
                                 41                 :                                 uint32 nblocks);
                                 42                 : static void _hash_splitbucket(Relation rel, Buffer metabuf,
                                 43                 :                               Bucket obucket, Bucket nbucket,
                                 44                 :                               Buffer obuf,
                                 45                 :                               Buffer nbuf,
                                 46                 :                               HTAB *htab,
                                 47                 :                               uint32 maxbucket,
                                 48                 :                               uint32 highmask, uint32 lowmask);
                                 49                 : static void log_split_page(Relation rel, Buffer buf);
                                 50                 : 
                                 51                 : 
                                 52                 : /*
                                 53                 :  *  _hash_getbuf() -- Get a buffer by block number for read or write.
                                 54                 :  *
                                 55                 :  *      'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
                                 56                 :  *      'flags' is a bitwise OR of the allowed page types.
                                 57                 :  *
                                 58                 :  *      This must be used only to fetch pages that are expected to be valid
                                 59                 :  *      already.  _hash_checkpage() is applied using the given flags.
                                 60                 :  *
                                 61                 :  *      When this routine returns, the appropriate lock is set on the
                                 62                 :  *      requested buffer and its reference count has been incremented
                                 63                 :  *      (ie, the buffer is "locked and pinned").
                                 64                 :  *
                                 65                 :  *      P_NEW is disallowed because this routine can only be used
                                 66                 :  *      to access pages that are known to be before the filesystem EOF.
                                 67                 :  *      Extending the index should be done with _hash_getnewbuf.
                                 68                 :  */
                                 69                 : Buffer
 5820 tgl                        70 CBC      921437 : _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
                                 71                 : {
                                 72                 :     Buffer      buf;
                                 73                 : 
 5834                            74          921437 :     if (blkno == P_NEW)
 5834 tgl                        75 UBC           0 :         elog(ERROR, "hash AM does not use P_NEW");
                                 76                 : 
 7157 tgl                        77 CBC      921437 :     buf = ReadBuffer(rel, blkno);
                                 78                 : 
                                 79          921437 :     if (access != HASH_NOLOCK)
                                 80          575921 :         LockBuffer(buf, access);
                                 81                 : 
                                 82                 :     /* ref count and lock type are correct */
                                 83                 : 
 5820                            84          921437 :     _hash_checkpage(rel, buf, flags);
                                 85                 : 
                                 86          921437 :     return buf;
                                 87                 : }
                                 88                 : 
                                 89                 : /*
                                 90                 :  * _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
                                 91                 :  *
                                 92                 :  *      We read the page and try to acquire a cleanup lock.  If we get it,
                                 93                 :  *      we return the buffer; otherwise, we return InvalidBuffer.
                                 94                 :  */
                                 95                 : Buffer
 2321 rhaas                      96             666 : _hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
                                 97                 : {
                                 98                 :     Buffer      buf;
                                 99                 : 
                                100             666 :     if (blkno == P_NEW)
 2321 rhaas                     101 UBC           0 :         elog(ERROR, "hash AM does not use P_NEW");
                                102                 : 
 2321 rhaas                     103 CBC         666 :     buf = ReadBuffer(rel, blkno);
                                104                 : 
                                105             666 :     if (!ConditionalLockBufferForCleanup(buf))
                                106                 :     {
 2321 rhaas                     107 UBC           0 :         ReleaseBuffer(buf);
                                108               0 :         return InvalidBuffer;
                                109                 :     }
                                110                 : 
                                111                 :     /* ref count and lock type are correct */
                                112                 : 
 2321 rhaas                     113 CBC         666 :     _hash_checkpage(rel, buf, flags);
                                114                 : 
                                115             666 :     return buf;
                                116                 : }
                                117                 : 
                                118                 : /*
                                119                 :  *  _hash_getinitbuf() -- Get and initialize a buffer by block number.
                                120                 :  *
                                121                 :  *      This must be used only to fetch pages that are known to be before
                                122                 :  *      the index's filesystem EOF, but are to be filled from scratch.
                                123                 :  *      _hash_pageinit() is applied automatically.  Otherwise it has
                                124                 :  *      effects similar to _hash_getbuf() with access = HASH_WRITE.
                                125                 :  *
                                126                 :  *      When this routine returns, a write lock is set on the
                                127                 :  *      requested buffer and its reference count has been incremented
                                128                 :  *      (ie, the buffer is "locked and pinned").
                                129                 :  *
                                130                 :  *      P_NEW is disallowed because this routine can only be used
                                131                 :  *      to access pages that are known to be before the filesystem EOF.
                                132                 :  *      Extending the index should be done with _hash_getnewbuf.
                                133                 :  */
                                134                 : Buffer
 5820 tgl                       135              35 : _hash_getinitbuf(Relation rel, BlockNumber blkno)
                                136                 : {
                                137                 :     Buffer      buf;
                                138                 : 
                                139              35 :     if (blkno == P_NEW)
 5820 tgl                       140 UBC           0 :         elog(ERROR, "hash AM does not use P_NEW");
                                141                 : 
 3069 heikki.linnakangas        142 CBC          35 :     buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
                                143                 :                              NULL);
                                144                 : 
                                145                 :     /* ref count and lock type are correct */
                                146                 : 
                                147                 :     /* initialize the page */
 2545 kgrittn                   148              35 :     _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
                                149                 : 
 7157 tgl                       150              35 :     return buf;
                                151                 : }
                                152                 : 
                                153                 : /*
                                154                 :  *  _hash_initbuf() -- Get and initialize a buffer by bucket number.
                                155                 :  */
                                156                 : void
 2224 rhaas                     157            4113 : _hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
                                158                 :               bool initpage)
                                159                 : {
                                160                 :     HashPageOpaque pageopaque;
                                161                 :     Page        page;
                                162                 : 
                                163            4113 :     page = BufferGetPage(buf);
                                164                 : 
                                165                 :     /* initialize the page */
                                166            4113 :     if (initpage)
                                167             275 :         _hash_pageinit(page, BufferGetPageSize(buf));
                                168                 : 
  373 michael                   169            4113 :     pageopaque = HashPageGetOpaque(page);
                                170                 : 
                                171                 :     /*
                                172                 :      * Set hasho_prevblkno with current hashm_maxbucket. This value will be
                                173                 :      * used to validate cached HashMetaPageData. See
                                174                 :      * _hash_getbucketbuf_from_hashkey().
                                175                 :      */
 2224 rhaas                     176            4113 :     pageopaque->hasho_prevblkno = max_bucket;
                                177            4113 :     pageopaque->hasho_nextblkno = InvalidBlockNumber;
                                178            4113 :     pageopaque->hasho_bucket = num_bucket;
                                179            4113 :     pageopaque->hasho_flag = flag;
                                180            4113 :     pageopaque->hasho_page_id = HASHO_PAGE_ID;
                                181            4113 : }
                                182                 : 
                                183                 : /*
                                184                 :  *  _hash_getnewbuf() -- Get a new page at the end of the index.
                                185                 :  *
                                186                 :  *      This has the same API as _hash_getinitbuf, except that we are adding
                                187                 :  *      a page to the index, and hence expect the page to be past the
                                188                 :  *      logical EOF.  (However, we have to support the case where it isn't,
                                189                 :  *      since a prior try might have crashed after extending the filesystem
                                190                 :  *      EOF but before updating the metapage to reflect the added page.)
                                191                 :  *
                                192                 :  *      It is caller's responsibility to ensure that only one process can
                                193                 :  *      extend the index at a time.  In practice, this function is called
                                194                 :  *      only while holding write lock on the metapage, because adding a page
                                195                 :  *      is always associated with an update of metapage data.
                                196                 :  */
                                197                 : Buffer
 4484                           198            4928 : _hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
                                199                 : {
                                200            4928 :     BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
                                201                 :     Buffer      buf;
                                202                 : 
 5834 tgl                       203            4928 :     if (blkno == P_NEW)
 5834 tgl                       204 UBC           0 :         elog(ERROR, "hash AM does not use P_NEW");
 5834 tgl                       205 CBC        4928 :     if (blkno > nblocks)
 5834 tgl                       206 UBC           0 :         elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
                                207                 :              RelationGetRelationName(rel));
                                208                 : 
                                209                 :     /* smgr insists we explicitly extend the relation */
 5834 tgl                       210 CBC        4928 :     if (blkno == nblocks)
                                211                 :     {
    4 andres                    212 GNC        4262 :         buf = ExtendBufferedRel(EB_REL(rel), forkNum, NULL,
                                213                 :                                 EB_LOCK_FIRST | EB_SKIP_EXTENSION_LOCK);
 5834 tgl                       214 GIC        4262 :         if (BufferGetBlockNumber(buf) != blkno)
 5834 tgl                       215 LBC           0 :             elog(ERROR, "unexpected hash relation size: %u, should be %u",
 5834 tgl                       216 EUB             :                  BufferGetBlockNumber(buf), blkno);
                                217                 :     }
                                218                 :     else
                                219                 :     {
 3069 heikki.linnakangas        220 CBC         666 :         buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
                                221                 :                                  NULL);
                                222                 :     }
                                223                 : 
                                224                 :     /* ref count and lock type are correct */
                                225                 : 
                                226                 :     /* initialize the page */
 2545 kgrittn                   227            4928 :     _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
                                228                 : 
 5834 tgl                       229            4928 :     return buf;
                                230                 : }
                                231                 : 
                                232                 : /*
                                233                 :  *  _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
                                234                 :  *
                                235                 :  *      This is identical to _hash_getbuf() but also allows a buffer access
                                236                 :  *      strategy to be specified.  We use this for VACUUM operations.
                                237                 :  */
                                238                 : Buffer
 5793                           239             469 : _hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
                                240                 :                            int access, int flags,
                                241                 :                            BufferAccessStrategy bstrategy)
                                242                 : {
                                243                 :     Buffer      buf;
                                244                 : 
                                245             469 :     if (blkno == P_NEW)
 5793 tgl                       246 UBC           0 :         elog(ERROR, "hash AM does not use P_NEW");
                                247                 : 
 5273 heikki.linnakangas        248 CBC         469 :     buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
                                249                 : 
 5793 tgl                       250             469 :     if (access != HASH_NOLOCK)
                                251             469 :         LockBuffer(buf, access);
                                252                 : 
                                253                 :     /* ref count and lock type are correct */
                                254                 : 
                                255             469 :     _hash_checkpage(rel, buf, flags);
                                256                 : 
                                257             469 :     return buf;
                                258                 : }
                                259                 : 
                                260                 : /*
                                261                 :  *  _hash_relbuf() -- release a locked buffer.
                                262                 :  *
                                263                 :  * Lock and pin (refcount) are both dropped.
                                264                 :  */
                                265                 : void
 7157                           266          546556 : _hash_relbuf(Relation rel, Buffer buf)
                                267                 : {
 6218                           268          546556 :     UnlockReleaseBuffer(buf);
 7157                           269          546556 : }
                                270                 : 
                                271                 : /*
                                272                 :  *  _hash_dropbuf() -- release an unlocked buffer.
                                273                 :  *
                                274                 :  * This is used to unpin a buffer on which we hold no lock.
                                275                 :  */
                                276                 : void
                                277          381266 : _hash_dropbuf(Relation rel, Buffer buf)
                                278                 : {
                                279          381266 :     ReleaseBuffer(buf);
                                280          381266 : }
                                281                 : 
                                282                 : /*
                                283                 :  *  _hash_dropscanbuf() -- release buffers used in scan.
                                284                 :  *
                                285                 :  * This routine unpins the buffers used during scan on which we
                                286                 :  * hold no lock.
                                287                 :  */
                                288                 : void
 2321 rhaas                     289             694 : _hash_dropscanbuf(Relation rel, HashScanOpaque so)
                                290                 : {
                                291                 :     /* release pin we hold on primary bucket page */
                                292             694 :     if (BufferIsValid(so->hashso_bucket_buf) &&
 2025                           293             292 :         so->hashso_bucket_buf != so->currPos.buf)
 2321                           294              78 :         _hash_dropbuf(rel, so->hashso_bucket_buf);
                                295             694 :     so->hashso_bucket_buf = InvalidBuffer;
                                296                 : 
                                297                 :     /* release pin we hold on primary bucket page  of bucket being split */
                                298             694 :     if (BufferIsValid(so->hashso_split_bucket_buf) &&
 2025 rhaas                     299 UBC           0 :         so->hashso_split_bucket_buf != so->currPos.buf)
 2321                           300               0 :         _hash_dropbuf(rel, so->hashso_split_bucket_buf);
 2321 rhaas                     301 CBC         694 :     so->hashso_split_bucket_buf = InvalidBuffer;
                                302                 : 
                                303                 :     /* release any pin we still hold */
 2025                           304             694 :     if (BufferIsValid(so->currPos.buf))
                                305             214 :         _hash_dropbuf(rel, so->currPos.buf);
                                306             694 :     so->currPos.buf = InvalidBuffer;
                                307                 : 
                                308                 :     /* reset split scan */
 2321                           309             694 :     so->hashso_buc_populated = false;
                                310             694 :     so->hashso_buc_split = false;
                                311             694 : }
                                312                 : 
                                313                 : 
                                314                 : /*
                                315                 :  *  _hash_init() -- Initialize the metadata page of a hash index,
                                316                 :  *              the initial buckets, and the initial bitmap page.
                                317                 :  *
                                318                 :  * The initial number of buckets is dependent on num_tuples, an estimate
                                319                 :  * of the number of tuples to be loaded into the index initially.  The
                                320                 :  * chosen number of buckets is returned.
                                321                 :  *
                                322                 :  * We are fairly cavalier about locking here, since we know that no one else
                                323                 :  * could be accessing this index.  In particular the rule about not holding
                                324                 :  * multiple buffer locks is ignored.
                                325                 :  */
                                326                 : uint32
 2224                           327             149 : _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
                                328                 : {
                                329                 :     Buffer      metabuf;
                                330                 :     Buffer      buf;
                                331                 :     Buffer      bitmapbuf;
                                332                 :     Page        pg;
                                333                 :     HashMetaPage metap;
                                334                 :     RegProcedure procid;
                                335                 :     int32       data_width;
                                336                 :     int32       item_width;
                                337                 :     int32       ffactor;
                                338                 :     uint32      num_buckets;
                                339                 :     uint32      i;
                                340                 :     bool        use_wal;
                                341                 : 
                                342                 :     /* safety check */
 4484                           343             149 :     if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
 7202 tgl                       344 UBC           0 :         elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
                                345                 :              RelationGetRelationName(rel));
                                346                 : 
                                347                 :     /*
                                348                 :      * WAL log creation of pages if the relation is persistent, or this is the
                                349                 :      * init fork.  Init forks for unlogged relations always need to be WAL
                                350                 :      * logged.
                                351                 :      */
 2092 rhaas                     352 CBC         149 :     use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
                                353                 : 
                                354                 :     /*
                                355                 :      * Determine the target fill factor (in tuples per bucket) for this index.
                                356                 :      * The idea is to make the fill factor correspond to pages about as full
                                357                 :      * as the user-settable fillfactor parameter says.  We can compute it
                                358                 :      * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
                                359                 :      */
 5319 tgl                       360             149 :     data_width = sizeof(uint32);
 6283                           361             149 :     item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
                                362                 :         sizeof(ItemIdData);     /* include the line pointer */
 1231 michael                   363             149 :     ffactor = HashGetTargetPageUsage(rel) / item_width;
                                364                 :     /* keep to a sane range */
 7157 tgl                       365             149 :     if (ffactor < 10)
 7157 tgl                       366 UBC           0 :         ffactor = 10;
                                367                 : 
 2047 rhaas                     368 CBC         149 :     procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
                                369                 : 
                                370                 :     /*
                                371                 :      * We initialize the metapage, the first N bucket pages, and the first
                                372                 :      * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
                                373                 :      * calls to occur.  This ensures that the smgr level has the right idea of
                                374                 :      * the physical index length.
                                375                 :      *
                                376                 :      * Critical section not required, because on error the creation of the
                                377                 :      * whole relation will be rolled back.
                                378                 :      */
 2224                           379             149 :     metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
                                380             149 :     _hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
                                381             149 :     MarkBufferDirty(metabuf);
                                382                 : 
                                383             149 :     pg = BufferGetPage(metabuf);
                                384             149 :     metap = HashPageGetMeta(pg);
                                385                 : 
                                386                 :     /* XLOG stuff */
 2092                           387             149 :     if (use_wal)
                                388                 :     {
                                389                 :         xl_hash_init_meta_page xlrec;
                                390                 :         XLogRecPtr  recptr;
                                391                 : 
 2217                           392              89 :         xlrec.num_tuples = num_tuples;
                                393              89 :         xlrec.procid = metap->hashm_procid;
                                394              89 :         xlrec.ffactor = metap->hashm_ffactor;
                                395                 : 
                                396              89 :         XLogBeginInsert();
                                397              89 :         XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
 1983 tgl                       398              89 :         XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
                                399                 : 
 2217 rhaas                     400              89 :         recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
                                401                 : 
                                402              89 :         PageSetLSN(BufferGetPage(metabuf), recptr);
                                403                 :     }
                                404                 : 
 2224                           405             149 :     num_buckets = metap->hashm_maxbucket + 1;
                                406                 : 
                                407                 :     /*
                                408                 :      * Release buffer lock on the metapage while we initialize buckets.
                                409                 :      * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
                                410                 :      * won't accomplish anything.  It's a bad idea to hold buffer locks for
                                411                 :      * long intervals in any case, since that can block the bgwriter.
                                412                 :      */
                                413             149 :     LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
                                414                 : 
                                415                 :     /*
                                416                 :      * Initialize and WAL Log the first N buckets
                                417                 :      */
                                418            3987 :     for (i = 0; i < num_buckets; i++)
                                419                 :     {
                                420                 :         BlockNumber blkno;
                                421                 : 
                                422                 :         /* Allow interrupts, in case N is huge */
                                423            3838 :         CHECK_FOR_INTERRUPTS();
                                424                 : 
                                425            3838 :         blkno = BUCKET_TO_BLKNO(metap, i);
                                426            3838 :         buf = _hash_getnewbuf(rel, blkno, forkNum);
                                427            3838 :         _hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
                                428            3838 :         MarkBufferDirty(buf);
                                429                 : 
 2092                           430            3838 :         if (use_wal)
  277 rhaas                     431 GNC        2592 :             log_newpage(&rel->rd_locator,
                                432                 :                         forkNum,
                                433                 :                         blkno,
                                434                 :                         BufferGetPage(buf),
                                435                 :                         true);
 2224 rhaas                     436 CBC        3838 :         _hash_relbuf(rel, buf);
                                437                 :     }
                                438                 : 
                                439                 :     /* Now reacquire buffer lock on metapage */
                                440             149 :     LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
                                441                 : 
                                442                 :     /*
                                443                 :      * Initialize bitmap page
                                444                 :      */
                                445             149 :     bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
                                446             149 :     _hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
                                447             149 :     MarkBufferDirty(bitmapbuf);
                                448                 : 
                                449                 :     /* add the new bitmap page to the metapage's list of bitmaps */
                                450                 :     /* metapage already has a write lock */
                                451             149 :     if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
 2224 rhaas                     452 UBC           0 :         ereport(ERROR,
                                453                 :                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                454                 :                  errmsg("out of overflow pages in hash index \"%s\"",
                                455                 :                         RelationGetRelationName(rel))));
                                456                 : 
 2224 rhaas                     457 CBC         149 :     metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
                                458                 : 
                                459             149 :     metap->hashm_nmaps++;
                                460             149 :     MarkBufferDirty(metabuf);
                                461                 : 
                                462                 :     /* XLOG stuff */
 2092                           463             149 :     if (use_wal)
                                464                 :     {
                                465                 :         xl_hash_init_bitmap_page xlrec;
                                466                 :         XLogRecPtr  recptr;
                                467                 : 
 2217                           468              89 :         xlrec.bmsize = metap->hashm_bmsize;
                                469                 : 
                                470              89 :         XLogBeginInsert();
                                471              89 :         XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
                                472              89 :         XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
                                473                 : 
                                474                 :         /*
                                475                 :          * This is safe only because nobody else can be modifying the index at
                                476                 :          * this stage; it's only visible to the transaction that is creating
                                477                 :          * it.
                                478                 :          */
                                479              89 :         XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
                                480                 : 
                                481              89 :         recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
                                482                 : 
                                483              89 :         PageSetLSN(BufferGetPage(bitmapbuf), recptr);
                                484              89 :         PageSetLSN(BufferGetPage(metabuf), recptr);
                                485                 :     }
                                486                 : 
                                487                 :     /* all done */
 2224                           488             149 :     _hash_relbuf(rel, bitmapbuf);
                                489             149 :     _hash_relbuf(rel, metabuf);
                                490                 : 
                                491             149 :     return num_buckets;
                                492                 : }
                                493                 : 
                                494                 : /*
                                495                 :  *  _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
                                496                 :  */
                                497                 : void
                                498             170 : _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
                                499                 :                       uint16 ffactor, bool initpage)
                                500                 : {
                                501                 :     HashMetaPage metap;
                                502                 :     HashPageOpaque pageopaque;
                                503                 :     Page        page;
                                504                 :     double      dnumbuckets;
                                505                 :     uint32      num_buckets;
                                506                 :     uint32      spare_index;
                                507                 :     uint32      lshift;
                                508                 : 
                                509                 :     /*
                                510                 :      * Choose the number of initial bucket pages to match the fill factor
                                511                 :      * given the estimated number of tuples.  We round up the result to the
                                512                 :      * total number of buckets which has to be allocated before using its
                                513                 :      * hashm_spares element. However always force at least 2 bucket pages. The
                                514                 :      * upper limit is determined by considerations explained in
                                515                 :      * _hash_expandtable().
                                516                 :      */
 5503 tgl                       517             170 :     dnumbuckets = num_tuples / ffactor;
                                518             170 :     if (dnumbuckets <= 2.0)
                                519              55 :         num_buckets = 2;
                                520             115 :     else if (dnumbuckets >= (double) 0x40000000)
 5503 tgl                       521 UBC           0 :         num_buckets = 0x40000000;
                                522                 :     else
 2197 rhaas                     523 CBC         115 :         num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
                                524                 : 
                                525             170 :     spare_index = _hash_spareindex(num_buckets);
                                526             170 :     Assert(spare_index < HASH_MAX_SPLITPOINTS);
                                527                 : 
 2224                           528             170 :     page = BufferGetPage(buf);
                                529             170 :     if (initpage)
                                530              21 :         _hash_pageinit(page, BufferGetPageSize(buf));
                                531                 : 
  373 michael                   532             170 :     pageopaque = HashPageGetOpaque(page);
 7160 tgl                       533             170 :     pageopaque->hasho_prevblkno = InvalidBlockNumber;
                                534             170 :     pageopaque->hasho_nextblkno = InvalidBlockNumber;
  646 peter                     535             170 :     pageopaque->hasho_bucket = InvalidBucket;
 7159 tgl                       536             170 :     pageopaque->hasho_flag = LH_META_PAGE;
 5844                           537             170 :     pageopaque->hasho_page_id = HASHO_PAGE_ID;
                                538                 : 
 2224 rhaas                     539             170 :     metap = HashPageGetMeta(page);
                                540                 : 
 9345 bruce                     541             170 :     metap->hashm_magic = HASH_MAGIC;
                                542             170 :     metap->hashm_version = HASH_VERSION;
 7160 tgl                       543             170 :     metap->hashm_ntuples = 0;
 9345 bruce                     544             170 :     metap->hashm_nmaps = 0;
 7157 tgl                       545             170 :     metap->hashm_ffactor = ffactor;
 2224 rhaas                     546             170 :     metap->hashm_bsize = HashGetMaxBitmapSize(page);
                                547                 : 
                                548                 :     /* find largest bitmap array size that will fit in page size */
 1096 drowley                   549             170 :     lshift = pg_leftmost_one_pos32(metap->hashm_bsize);
                                550             170 :     Assert(lshift > 0);
                                551             170 :     metap->hashm_bmsize = 1 << lshift;
                                552             170 :     metap->hashm_bmshift = lshift + BYTE_TO_BIT;
 7160 tgl                       553             170 :     Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
                                554                 : 
                                555                 :     /*
                                556                 :      * Label the index with its primary hash support function's OID.  This is
                                557                 :      * pretty useless for normal operation (in fact, hashm_procid is not used
                                558                 :      * anywhere), but it might be handy for forensic purposes so we keep it.
                                559                 :      */
 2224 rhaas                     560             170 :     metap->hashm_procid = procid;
                                561                 : 
                                562                 :     /*
                                563                 :      * We initialize the index with N buckets, 0 .. N-1, occupying physical
                                564                 :      * blocks 1 to N.  The first freespace bitmap page is in block N+1.
                                565                 :      */
 2197                           566             170 :     metap->hashm_maxbucket = num_buckets - 1;
                                567                 : 
                                568                 :     /*
                                569                 :      * Set highmask as next immediate ((2 ^ x) - 1), which should be
                                570                 :      * sufficient to cover num_buckets.
                                571                 :      */
 1096 drowley                   572             170 :     metap->hashm_highmask = pg_nextpower2_32(num_buckets + 1) - 1;
 2197 rhaas                     573             170 :     metap->hashm_lowmask = (metap->hashm_highmask >> 1);
                                574                 : 
 6542 neilc                     575             170 :     MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
                                576             170 :     MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
                                577                 : 
                                578                 :     /* Set up mapping for one spare page after the initial splitpoints */
 2197 rhaas                     579             170 :     metap->hashm_spares[spare_index] = 1;
                                580             170 :     metap->hashm_ovflpoint = spare_index;
 7160 tgl                       581             170 :     metap->hashm_firstfree = 0;
                                582                 : 
                                583                 :     /*
                                584                 :      * Set pd_lower just past the end of the metadata.  This is essential,
                                585                 :      * because without doing so, metadata will be lost if xlog.c compresses
                                586                 :      * the page.
                                587                 :      */
 2224 rhaas                     588             170 :     ((PageHeader) page)->pd_lower =
                                589             170 :         ((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
 9770 scrappy                   590             170 : }
                                591                 : 
                                592                 : /*
                                593                 :  *  _hash_pageinit() -- Initialize a new hash index page.
                                594                 :  */
                                595                 : void
                                596            5373 : _hash_pageinit(Page page, Size size)
                                597                 : {
 9345 bruce                     598            5373 :     PageInit(page, size, sizeof(HashPageOpaqueData));
 9770 scrappy                   599            5373 : }
                                600                 : 
                                601                 : /*
                                602                 :  * Attempt to expand the hash table by creating one new bucket.
                                603                 :  *
                                604                 :  * This will silently do nothing if we don't get cleanup lock on old or
                                605                 :  * new bucket.
                                606                 :  *
                                607                 :  * Complete the pending splits and remove the tuples from old bucket,
                                608                 :  * if there are any left over from the previous split.
                                609                 :  *
                                610                 :  * The caller must hold a pin, but no lock, on the metapage buffer.
                                611                 :  * The buffer is returned in the same state.
                                612                 :  */
                                613                 : void
                                614             666 : _hash_expandtable(Relation rel, Buffer metabuf)
                                615                 : {
                                616                 :     HashMetaPage metap;
                                617                 :     Bucket      old_bucket;
                                618                 :     Bucket      new_bucket;
                                619                 :     uint32      spare_ndx;
                                620                 :     BlockNumber start_oblkno;
                                621                 :     BlockNumber start_nblkno;
                                622                 :     Buffer      buf_nblkno;
                                623                 :     Buffer      buf_oblkno;
                                624                 :     Page        opage;
                                625                 :     Page        npage;
                                626                 :     HashPageOpaque oopaque;
                                627                 :     HashPageOpaque nopaque;
                                628                 :     uint32      maxbucket;
                                629                 :     uint32      highmask;
                                630                 :     uint32      lowmask;
 2217 rhaas                     631             666 :     bool        metap_update_masks = false;
                                632             666 :     bool        metap_update_splitpoint = false;
                                633                 : 
 2321 rhaas                     634 UBC           0 : restart_expand:
                                635                 : 
                                636                 :     /*
                                637                 :      * Write-lock the meta page.  It used to be necessary to acquire a
                                638                 :      * heavyweight lock to begin a split, but that is no longer required.
                                639                 :      */
 2298 rhaas                     640 CBC         666 :     LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
                                641                 : 
 6363 tgl                       642             666 :     _hash_checkpage(rel, metabuf, LH_META_PAGE);
 2545 kgrittn                   643             666 :     metap = HashPageGetMeta(BufferGetPage(metabuf));
                                644                 : 
                                645                 :     /*
                                646                 :      * Check to see if split is still needed; someone else might have already
                                647                 :      * done one while we waited for the lock.
                                648                 :      *
                                649                 :      * Make sure this stays in sync with _hash_doinsert()
                                650                 :      */
 7157 tgl                       651             666 :     if (metap->hashm_ntuples <=
                                652             666 :         (double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
 7157 tgl                       653 UBC           0 :         goto fail;
                                654                 : 
                                655                 :     /*
                                656                 :      * Can't split anymore if maxbucket has reached its maximum possible
                                657                 :      * value.
                                658                 :      *
                                659                 :      * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
                                660                 :      * the calculation maxbucket+1 mustn't overflow).  Currently we restrict
                                661                 :      * to half that to prevent failure of pg_ceil_log2_32() and insufficient
                                662                 :      * space in hashm_spares[].  It's moot anyway because an index with 2^32
                                663                 :      * buckets would certainly overflow BlockNumber and hence
                                664                 :      * _hash_alloc_buckets() would fail, but if we supported buckets smaller
                                665                 :      * than a disk block then this would be an independent constraint.
                                666                 :      *
                                667                 :      * If you change this, see also the maximum initial number of buckets in
                                668                 :      * _hash_init().
                                669                 :      */
 5985 tgl                       670 CBC         666 :     if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
 5985 tgl                       671 UBC           0 :         goto fail;
                                672                 : 
                                673                 :     /*
                                674                 :      * Determine which bucket is to be split, and attempt to take cleanup lock
                                675                 :      * on the old bucket.  If we can't get the lock, give up.
                                676                 :      *
                                677                 :      * The cleanup lock protects us not only against other backends, but
                                678                 :      * against our own backend as well.
                                679                 :      *
                                680                 :      * The cleanup lock is mainly to protect the split from concurrent
                                681                 :      * inserts. See src/backend/access/hash/README, Lock Definitions for
                                682                 :      * further details.  Due to this locking restriction, if there is any
                                683                 :      * pending scan, the split will give up which is not good, but harmless.
                                684                 :      */
 5834 tgl                       685 CBC         666 :     new_bucket = metap->hashm_maxbucket + 1;
                                686                 : 
 7160                           687             666 :     old_bucket = (new_bucket & metap->hashm_lowmask);
                                688                 : 
 7157                           689             666 :     start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
                                690                 : 
 2321 rhaas                     691             666 :     buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
                                692             666 :     if (!buf_oblkno)
 7157 tgl                       693 UBC           0 :         goto fail;
                                694                 : 
 2321 rhaas                     695 CBC         666 :     opage = BufferGetPage(buf_oblkno);
  373 michael                   696             666 :     oopaque = HashPageGetOpaque(opage);
                                697                 : 
                                698                 :     /*
                                699                 :      * We want to finish the split from a bucket as there is no apparent
                                700                 :      * benefit by not doing so and it will make the code complicated to finish
                                701                 :      * the split that involves multiple buckets considering the case where new
                                702                 :      * split also fails.  We don't need to consider the new bucket for
                                703                 :      * completing the split here as it is not possible that a re-split of new
                                704                 :      * bucket starts when there is still a pending split from old bucket.
                                705                 :      */
 2321 rhaas                     706             666 :     if (H_BUCKET_BEING_SPLIT(oopaque))
                                707                 :     {
                                708                 :         /*
                                709                 :          * Copy bucket mapping info now; refer the comment in code below where
                                710                 :          * we copy this information before calling _hash_splitbucket to see
                                711                 :          * why this is okay.
                                712                 :          */
 2321 rhaas                     713 UBC           0 :         maxbucket = metap->hashm_maxbucket;
                                714               0 :         highmask = metap->hashm_highmask;
                                715               0 :         lowmask = metap->hashm_lowmask;
                                716                 : 
                                717                 :         /*
                                718                 :          * Release the lock on metapage and old_bucket, before completing the
                                719                 :          * split.
                                720                 :          */
 2298                           721               0 :         LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
                                722               0 :         LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
                                723                 : 
 2321                           724               0 :         _hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
                                725                 :                            highmask, lowmask);
                                726                 : 
                                727                 :         /* release the pin on old buffer and retry for expand. */
                                728               0 :         _hash_dropbuf(rel, buf_oblkno);
                                729                 : 
                                730               0 :         goto restart_expand;
                                731                 :     }
                                732                 : 
                                733                 :     /*
                                734                 :      * Clean the tuples remained from the previous split.  This operation
                                735                 :      * requires cleanup lock and we already have one on the old bucket, so
                                736                 :      * let's do it. We also don't want to allow further splits from the bucket
                                737                 :      * till the garbage of previous split is cleaned.  This has two
                                738                 :      * advantages; first, it helps in avoiding the bloat due to garbage and
                                739                 :      * second is, during cleanup of bucket, we are always sure that the
                                740                 :      * garbage tuples belong to most recently split bucket.  On the contrary,
                                741                 :      * if we allow cleanup of bucket after meta page is updated to indicate
                                742                 :      * the new split and before the actual split, the cleanup operation won't
                                743                 :      * be able to decide whether the tuple has been moved to the newly created
                                744                 :      * bucket and ended up deleting such tuples.
                                745                 :      */
 2321 rhaas                     746 CBC         666 :     if (H_NEEDS_SPLIT_CLEANUP(oopaque))
                                747                 :     {
                                748                 :         /*
                                749                 :          * Copy bucket mapping info now; refer to the comment in code below
                                750                 :          * where we copy this information before calling _hash_splitbucket to
                                751                 :          * see why this is okay.
                                752                 :          */
 2316 rhaas                     753 UBC           0 :         maxbucket = metap->hashm_maxbucket;
                                754               0 :         highmask = metap->hashm_highmask;
                                755               0 :         lowmask = metap->hashm_lowmask;
                                756                 : 
                                757                 :         /* Release the metapage lock. */
 2298                           758               0 :         LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
                                759                 : 
 2321                           760               0 :         hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
                                761                 :                           maxbucket, highmask, lowmask, NULL, NULL, true,
                                762                 :                           NULL, NULL);
                                763                 : 
                                764               0 :         _hash_dropbuf(rel, buf_oblkno);
                                765                 : 
                                766               0 :         goto restart_expand;
                                767                 :     }
                                768                 : 
                                769                 :     /*
                                770                 :      * There shouldn't be any active scan on new bucket.
                                771                 :      *
                                772                 :      * Note: it is safe to compute the new bucket's blkno here, even though we
                                773                 :      * may still need to update the BUCKET_TO_BLKNO mapping.  This is because
                                774                 :      * the current value of hashm_spares[hashm_ovflpoint] correctly shows
                                775                 :      * where we are going to put a new splitpoint's worth of buckets.
                                776                 :      */
 5834 tgl                       777 CBC         666 :     start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
                                778                 : 
                                779                 :     /*
                                780                 :      * If the split point is increasing we need to allocate a new batch of
                                781                 :      * bucket pages.
                                782                 :      */
 2197 rhaas                     783             666 :     spare_ndx = _hash_spareindex(new_bucket + 1);
 5834 tgl                       784             666 :     if (spare_ndx > metap->hashm_ovflpoint)
                                785                 :     {
                                786                 :         uint32      buckets_to_add;
                                787                 : 
                                788              28 :         Assert(spare_ndx == metap->hashm_ovflpoint + 1);
                                789                 : 
                                790                 :         /*
                                791                 :          * We treat allocation of buckets as a separate WAL-logged action.
                                792                 :          * Even if we fail after this operation, won't leak bucket pages;
                                793                 :          * rather, the next split will consume this space. In any case, even
                                794                 :          * without failure we don't use all the space in one split operation.
                                795                 :          */
 2197 rhaas                     796              28 :         buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
                                797              28 :         if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
                                798                 :         {
                                799                 :             /* can't split due to BlockNumber overflow */
 2321 rhaas                     800 UBC           0 :             _hash_relbuf(rel, buf_oblkno);
 5834 tgl                       801               0 :             goto fail;
                                802                 :         }
                                803                 :     }
                                804                 : 
                                805                 :     /*
                                806                 :      * Physically allocate the new bucket's primary page.  We want to do this
                                807                 :      * before changing the metapage's mapping info, in case we can't get the
                                808                 :      * disk space.
                                809                 :      *
                                810                 :      * XXX It doesn't make sense to call _hash_getnewbuf first, zeroing the
                                811                 :      * buffer, and then only afterwards check whether we have a cleanup lock.
                                812                 :      * However, since no scan can be accessing the buffer yet, any concurrent
                                813                 :      * accesses will just be from processes like the bgwriter or checkpointer
                                814                 :      * which don't care about its contents, so it doesn't really matter.
                                815                 :      */
 2932 tgl                       816 CBC         666 :     buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
 2321 rhaas                     817             666 :     if (!IsBufferCleanupOK(buf_nblkno))
                                818                 :     {
 2321 rhaas                     819 UBC           0 :         _hash_relbuf(rel, buf_oblkno);
                                820               0 :         _hash_relbuf(rel, buf_nblkno);
                                821               0 :         goto fail;
                                822                 :     }
                                823                 : 
                                824                 :     /*
                                825                 :      * Since we are scribbling on the pages in the shared buffers, establish a
                                826                 :      * critical section.  Any failure in this next code leaves us with a big
                                827                 :      * problem: the metapage is effectively corrupt but could get written back
                                828                 :      * to disk.
                                829                 :      */
 6513 tgl                       830 CBC         666 :     START_CRIT_SECTION();
                                831                 : 
                                832                 :     /*
                                833                 :      * Okay to proceed with split.  Update the metapage bucket mapping info.
                                834                 :      */
 7157                           835             666 :     metap->hashm_maxbucket = new_bucket;
                                836                 : 
 7160                           837             666 :     if (new_bucket > metap->hashm_highmask)
                                838                 :     {
                                839                 :         /* Starting a new doubling */
                                840              10 :         metap->hashm_lowmask = metap->hashm_highmask;
                                841              10 :         metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
 2217 rhaas                     842              10 :         metap_update_masks = true;
                                843                 :     }
                                844                 : 
                                845                 :     /*
                                846                 :      * If the split point is increasing we need to adjust the hashm_spares[]
                                847                 :      * array and hashm_ovflpoint so that future overflow pages will be created
                                848                 :      * beyond this new batch of bucket pages.
                                849                 :      */
 7938 tgl                       850             666 :     if (spare_ndx > metap->hashm_ovflpoint)
                                851                 :     {
                                852              28 :         metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
                                853              28 :         metap->hashm_ovflpoint = spare_ndx;
 2217 rhaas                     854              28 :         metap_update_splitpoint = true;
                                855                 :     }
                                856                 : 
 2230                           857             666 :     MarkBufferDirty(metabuf);
                                858                 : 
                                859                 :     /*
                                860                 :      * Copy bucket mapping info now; this saves re-accessing the meta page
                                861                 :      * inside _hash_splitbucket's inner loop.  Note that once we drop the
                                862                 :      * split lock, other splits could begin, so these values might be out of
                                863                 :      * date before _hash_splitbucket finishes.  That's okay, since all it
                                864                 :      * needs is to tell which of these two buckets to map hashkeys into.
                                865                 :      */
 7157 tgl                       866             666 :     maxbucket = metap->hashm_maxbucket;
                                867             666 :     highmask = metap->hashm_highmask;
                                868             666 :     lowmask = metap->hashm_lowmask;
                                869                 : 
 2230 rhaas                     870             666 :     opage = BufferGetPage(buf_oblkno);
  373 michael                   871             666 :     oopaque = HashPageGetOpaque(opage);
                                872                 : 
                                873                 :     /*
                                874                 :      * Mark the old bucket to indicate that split is in progress.  (At
                                875                 :      * operation end, we will clear the split-in-progress flag.)  Also, for a
                                876                 :      * primary bucket page, hasho_prevblkno stores the number of buckets that
                                877                 :      * existed as of the last split, so we must update that value here.
                                878                 :      */
 2230 rhaas                     879             666 :     oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
                                880             666 :     oopaque->hasho_prevblkno = maxbucket;
                                881                 : 
                                882             666 :     MarkBufferDirty(buf_oblkno);
                                883                 : 
                                884             666 :     npage = BufferGetPage(buf_nblkno);
                                885                 : 
                                886                 :     /*
                                887                 :      * initialize the new bucket's primary page and mark it to indicate that
                                888                 :      * split is in progress.
                                889                 :      */
  373 michael                   890             666 :     nopaque = HashPageGetOpaque(npage);
 2230 rhaas                     891             666 :     nopaque->hasho_prevblkno = maxbucket;
                                892             666 :     nopaque->hasho_nextblkno = InvalidBlockNumber;
                                893             666 :     nopaque->hasho_bucket = new_bucket;
                                894             666 :     nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
                                895             666 :     nopaque->hasho_page_id = HASHO_PAGE_ID;
                                896                 : 
                                897             666 :     MarkBufferDirty(buf_nblkno);
                                898                 : 
                                899                 :     /* XLOG stuff */
 2217                           900             666 :     if (RelationNeedsWAL(rel))
                                901                 :     {
                                902                 :         xl_hash_split_allocate_page xlrec;
                                903                 :         XLogRecPtr  recptr;
                                904                 : 
                                905             540 :         xlrec.new_bucket = maxbucket;
                                906             540 :         xlrec.old_bucket_flag = oopaque->hasho_flag;
                                907             540 :         xlrec.new_bucket_flag = nopaque->hasho_flag;
                                908             540 :         xlrec.flags = 0;
                                909                 : 
                                910             540 :         XLogBeginInsert();
                                911                 : 
                                912             540 :         XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
                                913             540 :         XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
                                914             540 :         XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
                                915                 : 
                                916             540 :         if (metap_update_masks)
                                917                 :         {
                                918              10 :             xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS;
                                919              10 :             XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
                                920              10 :             XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
                                921                 :         }
                                922                 : 
                                923             540 :         if (metap_update_splitpoint)
                                924                 :         {
                                925              24 :             xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT;
                                926              24 :             XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
                                927                 :                                 sizeof(uint32));
                                928              24 :             XLogRegisterBufData(2,
 2118 tgl                       929              24 :                                 (char *) &metap->hashm_spares[metap->hashm_ovflpoint],
                                930                 :                                 sizeof(uint32));
                                931                 :         }
                                932                 : 
 2217 rhaas                     933             540 :         XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
                                934                 : 
                                935             540 :         recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
                                936                 : 
                                937             540 :         PageSetLSN(BufferGetPage(buf_oblkno), recptr);
                                938             540 :         PageSetLSN(BufferGetPage(buf_nblkno), recptr);
                                939             540 :         PageSetLSN(BufferGetPage(metabuf), recptr);
                                940                 :     }
                                941                 : 
 2230                           942             666 :     END_CRIT_SECTION();
                                943                 : 
                                944                 :     /* drop lock, but keep pin */
 2298                           945             666 :     LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
                                946                 : 
                                947                 :     /* Relocate records to the new bucket */
 2928 tgl                       948             666 :     _hash_splitbucket(rel, metabuf,
                                949                 :                       old_bucket, new_bucket,
                                950                 :                       buf_oblkno, buf_nblkno, NULL,
                                951                 :                       maxbucket, highmask, lowmask);
                                952                 : 
                                953                 :     /* all done, now release the pins on primary buckets. */
 2074 rhaas                     954             666 :     _hash_dropbuf(rel, buf_oblkno);
                                955             666 :     _hash_dropbuf(rel, buf_nblkno);
                                956                 : 
 7157 tgl                       957             666 :     return;
                                958                 : 
                                959                 :     /* Here if decide not to split or fail to acquire old bucket lock */
 7157 tgl                       960 UBC           0 : fail:
                                961                 : 
                                962                 :     /* We didn't write the metapage, so just drop lock */
 2298 rhaas                     963               0 :     LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
                                964                 : }
                                965                 : 
                                966                 : 
                                967                 : /*
                                968                 :  * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
                                969                 :  *
                                970                 :  * This does not need to initialize the new bucket pages; we'll do that as
                                971                 :  * each one is used by _hash_expandtable().  But we have to extend the logical
                                972                 :  * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
                                973                 :  * sync with ours, so that we don't get complaints from smgr.
                                974                 :  *
                                975                 :  * We do this by writing a page of zeroes at the end of the splitpoint range.
                                976                 :  * We expect that the filesystem will ensure that the intervening pages read
                                977                 :  * as zeroes too.  On many filesystems this "hole" will not be allocated
                                978                 :  * immediately, which means that the index file may end up more fragmented
                                979                 :  * than if we forced it all to be allocated now; but since we don't scan
                                980                 :  * hash indexes sequentially anyway, that probably doesn't matter.
                                981                 :  *
                                982                 :  * XXX It's annoying that this code is executed with the metapage lock held.
                                983                 :  * We need to interlock against _hash_addovflpage() adding a new overflow page
                                984                 :  * concurrently, but it'd likely be better to use LockRelationForExtension
                                985                 :  * for the purpose.  OTOH, adding a splitpoint is a very infrequent operation,
                                986                 :  * so it may not be worth worrying about.
                                987                 :  *
                                988                 :  * Returns true if successful, or false if allocation failed due to
                                989                 :  * BlockNumber overflow.
                                990                 :  */
                                991                 : static bool
 5834 tgl                       992 CBC          28 : _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
                                993                 : {
                                994                 :     BlockNumber lastblock;
                                995                 :     PGIOAlignedBlock zerobuf;
                                996                 :     Page        page;
                                997                 :     HashPageOpaque ovflopaque;
                                998                 : 
 5985                           999              28 :     lastblock = firstblock + nblocks - 1;
                               1000                 : 
                               1001                 :     /*
                               1002                 :      * Check for overflow in block number calculation; if so, we cannot extend
                               1003                 :      * the index anymore.
                               1004                 :      */
                               1005              28 :     if (lastblock < firstblock || lastblock == InvalidBlockNumber)
 5834 tgl                      1006 UBC           0 :         return false;
                               1007                 : 
 1681 tgl                      1008 CBC          28 :     page = (Page) zerobuf.data;
                               1009                 : 
                               1010                 :     /*
                               1011                 :      * Initialize the page.  Just zeroing the page won't work; see
                               1012                 :      * _hash_freeovflpage for similar usage.  We take care to make the special
                               1013                 :      * space valid for the benefit of tools such as pageinspect.
                               1014                 :      */
 2217 rhaas                    1015              28 :     _hash_pageinit(page, BLCKSZ);
                               1016                 : 
  373 michael                  1017              28 :     ovflopaque = HashPageGetOpaque(page);
                               1018                 : 
 2195 rhaas                    1019              28 :     ovflopaque->hasho_prevblkno = InvalidBlockNumber;
                               1020              28 :     ovflopaque->hasho_nextblkno = InvalidBlockNumber;
  646 peter                    1021              28 :     ovflopaque->hasho_bucket = InvalidBucket;
 2195 rhaas                    1022              28 :     ovflopaque->hasho_flag = LH_UNUSED_PAGE;
                               1023              28 :     ovflopaque->hasho_page_id = HASHO_PAGE_ID;
                               1024                 : 
 2217                          1025              28 :     if (RelationNeedsWAL(rel))
  277 rhaas                    1026 GNC          24 :         log_newpage(&rel->rd_locator,
                               1027                 :                     MAIN_FORKNUM,
                               1028                 :                     lastblock,
                               1029                 :                     zerobuf.data,
                               1030                 :                     true);
                               1031                 : 
 1678 akapila                  1032 CBC          28 :     PageSetChecksumInplace(page, lastblock);
  636 tgl                      1033              28 :     smgrextend(RelationGetSmgr(rel), MAIN_FORKNUM, lastblock, zerobuf.data,
                               1034                 :                false);
                               1035                 : 
 5834                          1036              28 :     return true;
                               1037                 : }
                               1038                 : 
                               1039                 : 
                               1040                 : /*
                               1041                 :  * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
                               1042                 :  *
                               1043                 :  * This routine is used to partition the tuples between old and new bucket and
                               1044                 :  * is used to finish the incomplete split operations.  To finish the previously
                               1045                 :  * interrupted split operation, the caller needs to fill htab.  If htab is set,
                               1046                 :  * then we skip the movement of tuples that exists in htab, otherwise NULL
                               1047                 :  * value of htab indicates movement of all the tuples that belong to the new
                               1048                 :  * bucket.
                               1049                 :  *
                               1050                 :  * We are splitting a bucket that consists of a base bucket page and zero
                               1051                 :  * or more overflow (bucket chain) pages.  We must relocate tuples that
                               1052                 :  * belong in the new bucket.
                               1053                 :  *
                               1054                 :  * The caller must hold cleanup locks on both buckets to ensure that
                               1055                 :  * no one else is trying to access them (see README).
                               1056                 :  *
                               1057                 :  * The caller must hold a pin, but no lock, on the metapage buffer.
                               1058                 :  * The buffer is returned in the same state.  (The metapage is only
                               1059                 :  * touched if it becomes necessary to add or remove overflow pages.)
                               1060                 :  *
                               1061                 :  * Split needs to retain pin on primary bucket pages of both old and new
                               1062                 :  * buckets till end of operation.  This is to prevent vacuum from starting
                               1063                 :  * while a split is in progress.
                               1064                 :  *
                               1065                 :  * In addition, the caller must have created the new bucket's base page,
                               1066                 :  * which is passed in buffer nbuf, pinned and write-locked.  The lock will be
                               1067                 :  * released here and pin must be released by the caller.  (The API is set up
                               1068                 :  * this way because we must do _hash_getnewbuf() before releasing the metapage
                               1069                 :  * write lock.  So instead of passing the new bucket's start block number, we
                               1070                 :  * pass an actual buffer.)
                               1071                 :  */
                               1072                 : static void
 7160                          1073             666 : _hash_splitbucket(Relation rel,
                               1074                 :                   Buffer metabuf,
                               1075                 :                   Bucket obucket,
                               1076                 :                   Bucket nbucket,
                               1077                 :                   Buffer obuf,
                               1078                 :                   Buffer nbuf,
                               1079                 :                   HTAB *htab,
                               1080                 :                   uint32 maxbucket,
                               1081                 :                   uint32 highmask,
                               1082                 :                   uint32 lowmask)
                               1083                 : {
                               1084                 :     Buffer      bucket_obuf;
                               1085                 :     Buffer      bucket_nbuf;
                               1086                 :     Page        opage;
                               1087                 :     Page        npage;
                               1088                 :     HashPageOpaque oopaque;
                               1089                 :     HashPageOpaque nopaque;
                               1090                 :     OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
                               1091                 :     IndexTuple  itups[MaxIndexTuplesPerPage];
 2217 rhaas                    1092             666 :     Size        all_tups_size = 0;
                               1093                 :     int         i;
                               1094             666 :     uint16      nitups = 0;
                               1095                 : 
 2321                          1096             666 :     bucket_obuf = obuf;
                               1097             666 :     opage = BufferGetPage(obuf);
  373 michael                  1098             666 :     oopaque = HashPageGetOpaque(opage);
                               1099                 : 
 2321 rhaas                    1100             666 :     bucket_nbuf = nbuf;
                               1101             666 :     npage = BufferGetPage(nbuf);
  373 michael                  1102             666 :     nopaque = HashPageGetOpaque(npage);
                               1103                 : 
                               1104                 :     /* Copy the predicate locks from old bucket to new bucket. */
 1828 teodor                   1105             666 :     PredicateLockPageSplit(rel,
                               1106                 :                            BufferGetBlockNumber(bucket_obuf),
                               1107                 :                            BufferGetBlockNumber(bucket_nbuf));
                               1108                 : 
                               1109                 :     /*
                               1110                 :      * Partition the tuples in the old bucket between the old bucket and the
                               1111                 :      * new bucket, advancing along the old bucket's overflow bucket chain and
                               1112                 :      * adding overflow pages to the new bucket as needed.  Outer loop iterates
                               1113                 :      * once per page in old bucket.
                               1114                 :      */
                               1115                 :     for (;;)
 9345 bruce                    1116             169 :     {
                               1117                 :         BlockNumber oblkno;
                               1118                 :         OffsetNumber ooffnum;
                               1119                 :         OffsetNumber omaxoffnum;
                               1120                 : 
                               1121                 :         /* Scan each tuple in old page */
 4907 tgl                      1122             835 :         omaxoffnum = PageGetMaxOffsetNumber(opage);
                               1123             835 :         for (ooffnum = FirstOffsetNumber;
                               1124          153446 :              ooffnum <= omaxoffnum;
                               1125          152611 :              ooffnum = OffsetNumberNext(ooffnum))
                               1126                 :         {
                               1127                 :             IndexTuple  itup;
                               1128                 :             Size        itemsz;
                               1129                 :             Bucket      bucket;
 2321 rhaas                    1130          152611 :             bool        found = false;
                               1131                 : 
                               1132                 :             /* skip dead tuples */
 2343                          1133          152611 :             if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
 2343 rhaas                    1134 UBC           0 :                 continue;
                               1135                 : 
                               1136                 :             /*
                               1137                 :              * Before inserting a tuple, probe the hash table containing TIDs
                               1138                 :              * of tuples belonging to new bucket, if we find a match, then
                               1139                 :              * skip that tuple, else fetch the item's hash key (conveniently
                               1140                 :              * stored in the item) and determine which bucket it now belongs
                               1141                 :              * in.
                               1142                 :              */
 4907 tgl                      1143 CBC      152611 :             itup = (IndexTuple) PageGetItem(opage,
                               1144                 :                                             PageGetItemId(opage, ooffnum));
                               1145                 : 
 2321 rhaas                    1146          152611 :             if (htab)
 2321 rhaas                    1147 UBC           0 :                 (void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
                               1148                 : 
 2321 rhaas                    1149 CBC      152611 :             if (found)
 2321 rhaas                    1150 UBC           0 :                 continue;
                               1151                 : 
 4907 tgl                      1152 CBC      152611 :             bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
                               1153                 :                                           maxbucket, highmask, lowmask);
                               1154                 : 
                               1155          152611 :             if (bucket == nbucket)
                               1156                 :             {
                               1157                 :                 IndexTuple  new_itup;
                               1158                 : 
                               1159                 :                 /*
                               1160                 :                  * make a copy of index tuple as we have to scribble on it.
                               1161                 :                  */
 2321 rhaas                    1162           62483 :                 new_itup = CopyIndexTuple(itup);
                               1163                 : 
                               1164                 :                 /*
                               1165                 :                  * mark the index tuple as moved by split, such tuples are
                               1166                 :                  * skipped by scan if there is split in progress for a bucket.
                               1167                 :                  */
                               1168           62483 :                 new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
                               1169                 : 
                               1170                 :                 /*
                               1171                 :                  * insert the tuple into the new bucket.  if it doesn't fit on
                               1172                 :                  * the current page in the new bucket, we must allocate a new
                               1173                 :                  * overflow page and place the tuple on that page instead.
                               1174                 :                  */
 1866 tgl                      1175           62483 :                 itemsz = IndexTupleSize(new_itup);
 4907                          1176           62483 :                 itemsz = MAXALIGN(itemsz);
                               1177                 : 
 2217 rhaas                    1178           62483 :                 if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
                               1179                 :                 {
                               1180                 :                     /*
                               1181                 :                      * Change the shared buffer state in critical section,
                               1182                 :                      * otherwise any error could make it unrecoverable.
                               1183                 :                      */
                               1184              39 :                     START_CRIT_SECTION();
                               1185                 : 
                               1186              39 :                     _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
 2298                          1187              39 :                     MarkBufferDirty(nbuf);
                               1188                 :                     /* log the split operation before releasing the lock */
 2217                          1189              39 :                     log_split_page(rel, nbuf);
                               1190                 : 
                               1191              39 :                     END_CRIT_SECTION();
                               1192                 : 
                               1193                 :                     /* drop lock, but keep pin */
 2298                          1194              39 :                     LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
                               1195                 : 
                               1196                 :                     /* be tidy */
 2217                          1197           15912 :                     for (i = 0; i < nitups; i++)
                               1198           15873 :                         pfree(itups[i]);
                               1199              39 :                     nitups = 0;
                               1200              39 :                     all_tups_size = 0;
                               1201                 : 
                               1202                 :                     /* chain to a new overflow page */
  578 michael                  1203              39 :                     nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf));
 2545 kgrittn                  1204              39 :                     npage = BufferGetPage(nbuf);
  373 michael                  1205              39 :                     nopaque = HashPageGetOpaque(npage);
                               1206                 :                 }
                               1207                 : 
 2217 rhaas                    1208           62483 :                 itups[nitups++] = new_itup;
                               1209           62483 :                 all_tups_size += itemsz;
                               1210                 :             }
                               1211                 :             else
                               1212                 :             {
                               1213                 :                 /*
                               1214                 :                  * the tuple stays on this page, so nothing to do.
                               1215                 :                  */
 4907 tgl                      1216           90128 :                 Assert(bucket == obucket);
                               1217                 :             }
                               1218                 :         }
                               1219                 : 
                               1220             835 :         oblkno = oopaque->hasho_nextblkno;
                               1221                 : 
                               1222                 :         /* retain the pin on the old primary bucket */
 2321 rhaas                    1223             835 :         if (obuf == bucket_obuf)
 2298                          1224             666 :             LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
                               1225                 :         else
 4907 tgl                      1226             169 :             _hash_relbuf(rel, obuf);
                               1227                 : 
                               1228                 :         /* Exit loop if no more overflow pages in old bucket */
                               1229             835 :         if (!BlockNumberIsValid(oblkno))
                               1230                 :         {
                               1231                 :             /*
                               1232                 :              * Change the shared buffer state in critical section, otherwise
                               1233                 :              * any error could make it unrecoverable.
                               1234                 :              */
 2217 rhaas                    1235             666 :             START_CRIT_SECTION();
                               1236                 : 
                               1237             666 :             _hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
 2230                          1238             666 :             MarkBufferDirty(nbuf);
                               1239                 :             /* log the split operation before releasing the lock */
 2217                          1240             666 :             log_split_page(rel, nbuf);
                               1241                 : 
                               1242             666 :             END_CRIT_SECTION();
                               1243                 : 
 2230                          1244             666 :             if (nbuf == bucket_nbuf)
                               1245             663 :                 LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
                               1246                 :             else
                               1247               3 :                 _hash_relbuf(rel, nbuf);
                               1248                 : 
                               1249                 :             /* be tidy */
 2217                          1250           47276 :             for (i = 0; i < nitups; i++)
                               1251           46610 :                 pfree(itups[i]);
 4907 tgl                      1252             666 :             break;
                               1253                 :         }
                               1254                 : 
                               1255                 :         /* Else, advance to next old page */
 2321 rhaas                    1256             169 :         obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
 2545 kgrittn                  1257             169 :         opage = BufferGetPage(obuf);
  373 michael                  1258             169 :         oopaque = HashPageGetOpaque(opage);
                               1259                 :     }
                               1260                 : 
                               1261                 :     /*
                               1262                 :      * We're at the end of the old bucket chain, so we're done partitioning
                               1263                 :      * the tuples.  Mark the old and new buckets to indicate split is
                               1264                 :      * finished.
                               1265                 :      *
                               1266                 :      * To avoid deadlocks due to locking order of buckets, first lock the old
                               1267                 :      * bucket and then the new bucket.
                               1268                 :      */
 2298 rhaas                    1269             666 :     LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
 2321                          1270             666 :     opage = BufferGetPage(bucket_obuf);
  373 michael                  1271             666 :     oopaque = HashPageGetOpaque(opage);
                               1272                 : 
 2298 rhaas                    1273             666 :     LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
 2321                          1274             666 :     npage = BufferGetPage(bucket_nbuf);
  373 michael                  1275             666 :     nopaque = HashPageGetOpaque(npage);
                               1276                 : 
 2217 rhaas                    1277             666 :     START_CRIT_SECTION();
                               1278                 : 
 2321                          1279             666 :     oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
                               1280             666 :     nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
                               1281                 : 
                               1282                 :     /*
                               1283                 :      * After the split is finished, mark the old bucket to indicate that it
                               1284                 :      * contains deletable tuples.  We will clear split-cleanup flag after
                               1285                 :      * deleting such tuples either at the end of split or at the next split
                               1286                 :      * from old bucket or at the time of vacuum.
                               1287                 :      */
                               1288             666 :     oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
                               1289                 : 
                               1290                 :     /*
                               1291                 :      * now write the buffers, here we don't release the locks as caller is
                               1292                 :      * responsible to release locks.
                               1293                 :      */
                               1294             666 :     MarkBufferDirty(bucket_obuf);
                               1295             666 :     MarkBufferDirty(bucket_nbuf);
                               1296                 : 
 2217                          1297             666 :     if (RelationNeedsWAL(rel))
                               1298                 :     {
                               1299                 :         XLogRecPtr  recptr;
                               1300                 :         xl_hash_split_complete xlrec;
                               1301                 : 
                               1302             540 :         xlrec.old_bucket_flag = oopaque->hasho_flag;
                               1303             540 :         xlrec.new_bucket_flag = nopaque->hasho_flag;
                               1304                 : 
                               1305             540 :         XLogBeginInsert();
                               1306                 : 
                               1307             540 :         XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
                               1308                 : 
                               1309             540 :         XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
                               1310             540 :         XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
                               1311                 : 
                               1312             540 :         recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
                               1313                 : 
                               1314             540 :         PageSetLSN(BufferGetPage(bucket_obuf), recptr);
                               1315             540 :         PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
                               1316                 :     }
                               1317                 : 
                               1318             666 :     END_CRIT_SECTION();
                               1319                 : 
                               1320                 :     /*
                               1321                 :      * If possible, clean up the old bucket.  We might not be able to do this
                               1322                 :      * if someone else has a pin on it, but if not then we can go ahead.  This
                               1323                 :      * isn't absolutely necessary, but it reduces bloat; if we don't do it
                               1324                 :      * now, VACUUM will do it eventually, but maybe not until new overflow
                               1325                 :      * pages have been allocated.  Note that there's no need to clean up the
                               1326                 :      * new bucket.
                               1327                 :      */
 2074                          1328             666 :     if (IsBufferCleanupOK(bucket_obuf))
                               1329                 :     {
                               1330             666 :         LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
                               1331             666 :         hashbucketcleanup(rel, obucket, bucket_obuf,
                               1332                 :                           BufferGetBlockNumber(bucket_obuf), NULL,
                               1333                 :                           maxbucket, highmask, lowmask, NULL, NULL, true,
                               1334                 :                           NULL, NULL);
                               1335                 :     }
                               1336                 :     else
                               1337                 :     {
 2074 rhaas                    1338 UBC           0 :         LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
                               1339               0 :         LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
                               1340                 :     }
 2321 rhaas                    1341 CBC         666 : }
                               1342                 : 
                               1343                 : /*
                               1344                 :  *  _hash_finish_split() -- Finish the previously interrupted split operation
                               1345                 :  *
                               1346                 :  * To complete the split operation, we form the hash table of TIDs in new
                               1347                 :  * bucket which is then used by split operation to skip tuples that are
                               1348                 :  * already moved before the split operation was previously interrupted.
                               1349                 :  *
                               1350                 :  * The caller must hold a pin, but no lock, on the metapage and old bucket's
                               1351                 :  * primary page buffer.  The buffers are returned in the same state.  (The
                               1352                 :  * metapage is only touched if it becomes necessary to add or remove overflow
                               1353                 :  * pages.)
                               1354                 :  */
                               1355                 : void
 2321 rhaas                    1356 UBC           0 : _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
                               1357                 :                    uint32 maxbucket, uint32 highmask, uint32 lowmask)
                               1358                 : {
                               1359                 :     HASHCTL     hash_ctl;
                               1360                 :     HTAB       *tidhtab;
                               1361               0 :     Buffer      bucket_nbuf = InvalidBuffer;
                               1362                 :     Buffer      nbuf;
                               1363                 :     Page        npage;
                               1364                 :     BlockNumber nblkno;
                               1365                 :     BlockNumber bucket_nblkno;
                               1366                 :     HashPageOpaque npageopaque;
                               1367                 :     Bucket      nbucket;
                               1368                 :     bool        found;
                               1369                 : 
                               1370                 :     /* Initialize hash tables used to track TIDs */
                               1371               0 :     hash_ctl.keysize = sizeof(ItemPointerData);
                               1372               0 :     hash_ctl.entrysize = sizeof(ItemPointerData);
                               1373               0 :     hash_ctl.hcxt = CurrentMemoryContext;
                               1374                 : 
                               1375                 :     tidhtab =
                               1376               0 :         hash_create("bucket ctids",
                               1377                 :                     256,        /* arbitrary initial size */
                               1378                 :                     &hash_ctl,
                               1379                 :                     HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
                               1380                 : 
                               1381               0 :     bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
                               1382                 : 
                               1383                 :     /*
                               1384                 :      * Scan the new bucket and build hash table of TIDs
                               1385                 :      */
                               1386                 :     for (;;)
                               1387               0 :     {
                               1388                 :         OffsetNumber noffnum;
                               1389                 :         OffsetNumber nmaxoffnum;
                               1390                 : 
                               1391               0 :         nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
                               1392                 :                             LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                               1393                 : 
                               1394                 :         /* remember the primary bucket buffer to acquire cleanup lock on it. */
                               1395               0 :         if (nblkno == bucket_nblkno)
                               1396               0 :             bucket_nbuf = nbuf;
                               1397                 : 
                               1398               0 :         npage = BufferGetPage(nbuf);
  373 michael                  1399               0 :         npageopaque = HashPageGetOpaque(npage);
                               1400                 : 
                               1401                 :         /* Scan each tuple in new page */
 2321 rhaas                    1402               0 :         nmaxoffnum = PageGetMaxOffsetNumber(npage);
                               1403               0 :         for (noffnum = FirstOffsetNumber;
                               1404               0 :              noffnum <= nmaxoffnum;
                               1405               0 :              noffnum = OffsetNumberNext(noffnum))
                               1406                 :         {
                               1407                 :             IndexTuple  itup;
                               1408                 : 
                               1409                 :             /* Fetch the item's TID and insert it in hash table. */
                               1410               0 :             itup = (IndexTuple) PageGetItem(npage,
                               1411                 :                                             PageGetItemId(npage, noffnum));
                               1412                 : 
                               1413               0 :             (void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
                               1414                 : 
                               1415               0 :             Assert(!found);
                               1416                 :         }
                               1417                 : 
                               1418               0 :         nblkno = npageopaque->hasho_nextblkno;
                               1419                 : 
                               1420                 :         /*
                               1421                 :          * release our write lock without modifying buffer and ensure to
                               1422                 :          * retain the pin on primary bucket.
                               1423                 :          */
                               1424               0 :         if (nbuf == bucket_nbuf)
 2298                          1425               0 :             LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
                               1426                 :         else
 2321                          1427               0 :             _hash_relbuf(rel, nbuf);
                               1428                 : 
                               1429                 :         /* Exit loop if no more overflow pages in new bucket */
                               1430               0 :         if (!BlockNumberIsValid(nblkno))
                               1431               0 :             break;
                               1432                 :     }
                               1433                 : 
                               1434                 :     /*
                               1435                 :      * Conditionally get the cleanup lock on old and new buckets to perform
                               1436                 :      * the split operation.  If we don't get the cleanup locks, silently give
                               1437                 :      * up and next insertion on old bucket will try again to complete the
                               1438                 :      * split.
                               1439                 :      */
                               1440               0 :     if (!ConditionalLockBufferForCleanup(obuf))
                               1441                 :     {
                               1442               0 :         hash_destroy(tidhtab);
                               1443               0 :         return;
                               1444                 :     }
                               1445               0 :     if (!ConditionalLockBufferForCleanup(bucket_nbuf))
                               1446                 :     {
 2298                          1447               0 :         LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
 2321                          1448               0 :         hash_destroy(tidhtab);
                               1449               0 :         return;
                               1450                 :     }
                               1451                 : 
                               1452               0 :     npage = BufferGetPage(bucket_nbuf);
  373 michael                  1453               0 :     npageopaque = HashPageGetOpaque(npage);
 2321 rhaas                    1454               0 :     nbucket = npageopaque->hasho_bucket;
                               1455                 : 
 2230                          1456               0 :     _hash_splitbucket(rel, metabuf, obucket,
                               1457                 :                       nbucket, obuf, bucket_nbuf, tidhtab,
                               1458                 :                       maxbucket, highmask, lowmask);
                               1459                 : 
 2074                          1460               0 :     _hash_dropbuf(rel, bucket_nbuf);
 2321                          1461               0 :     hash_destroy(tidhtab);
                               1462                 : }
                               1463                 : 
                               1464                 : /*
                               1465                 :  *  log_split_page() -- Log the split operation
                               1466                 :  *
                               1467                 :  *  We log the split operation when the new page in new bucket gets full,
                               1468                 :  *  so we log the entire page.
                               1469                 :  *
                               1470                 :  *  'buf' must be locked by the caller which is also responsible for unlocking
                               1471                 :  *  it.
                               1472                 :  */
                               1473                 : static void
 2217 rhaas                    1474 CBC         705 : log_split_page(Relation rel, Buffer buf)
                               1475                 : {
                               1476             705 :     if (RelationNeedsWAL(rel))
                               1477                 :     {
                               1478                 :         XLogRecPtr  recptr;
                               1479                 : 
                               1480             579 :         XLogBeginInsert();
                               1481                 : 
                               1482             579 :         XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
                               1483                 : 
                               1484             579 :         recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
                               1485                 : 
                               1486             579 :         PageSetLSN(BufferGetPage(buf), recptr);
                               1487                 :     }
                               1488             705 : }
                               1489                 : 
                               1490                 : /*
                               1491                 :  *  _hash_getcachedmetap() -- Returns cached metapage data.
                               1492                 :  *
                               1493                 :  *  If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
                               1494                 :  *  the metapage.  If not set, we'll set it before returning if we have to
                               1495                 :  *  refresh the cache, and return with a pin but no lock on it; caller is
                               1496                 :  *  responsible for releasing the pin.
                               1497                 :  *
                               1498                 :  *  We refresh the cache if it's not initialized yet or force_refresh is true.
                               1499                 :  */
                               1500                 : HashMetaPage
 2252                          1501          346116 : _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
                               1502                 : {
                               1503                 :     Page        page;
                               1504                 : 
                               1505          346116 :     Assert(metabuf);
                               1506          346116 :     if (force_refresh || rel->rd_amcache == NULL)
                               1507                 :     {
 2153 bruce                    1508             546 :         char       *cache = NULL;
                               1509                 : 
                               1510                 :         /*
                               1511                 :          * It's important that we don't set rd_amcache to an invalid value.
                               1512                 :          * Either MemoryContextAlloc or _hash_getbuf could fail, so don't
                               1513                 :          * install a pointer to the newly-allocated storage in the actual
                               1514                 :          * relcache entry until both have succeeded.
                               1515                 :          */
 2252 rhaas                    1516             546 :         if (rel->rd_amcache == NULL)
                               1517             245 :             cache = MemoryContextAlloc(rel->rd_indexcxt,
                               1518                 :                                        sizeof(HashMetaPageData));
                               1519                 : 
                               1520                 :         /* Read the metapage. */
                               1521             546 :         if (BufferIsValid(*metabuf))
 2252 rhaas                    1522 UBC           0 :             LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
                               1523                 :         else
 2252 rhaas                    1524 CBC         546 :             *metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
                               1525                 :                                     LH_META_PAGE);
                               1526             546 :         page = BufferGetPage(*metabuf);
                               1527                 : 
                               1528                 :         /* Populate the cache. */
                               1529             546 :         if (rel->rd_amcache == NULL)
                               1530             245 :             rel->rd_amcache = cache;
                               1531             546 :         memcpy(rel->rd_amcache, HashPageGetMeta(page),
                               1532                 :                sizeof(HashMetaPageData));
                               1533                 : 
                               1534                 :         /* Release metapage lock, but keep the pin. */
                               1535             546 :         LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
                               1536                 :     }
                               1537                 : 
                               1538          346116 :     return (HashMetaPage) rel->rd_amcache;
                               1539                 : }
                               1540                 : 
                               1541                 : /*
                               1542                 :  *  _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
                               1543                 :  *                                       hashkey.
                               1544                 :  *
                               1545                 :  *  Bucket pages do not move or get removed once they are allocated. This give
                               1546                 :  *  us an opportunity to use the previously saved metapage contents to reach
                               1547                 :  *  the target bucket buffer, instead of reading from the metapage every time.
                               1548                 :  *  This saves one buffer access every time we want to reach the target bucket
                               1549                 :  *  buffer, which is very helpful savings in bufmgr traffic and contention.
                               1550                 :  *
                               1551                 :  *  The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
                               1552                 :  *  bucket buffer has to be locked for reading or writing.
                               1553                 :  *
                               1554                 :  *  The out parameter cachedmetap is set with metapage contents used for
                               1555                 :  *  hashkey to bucket buffer mapping. Some callers need this info to reach the
                               1556                 :  *  old bucket in case of bucket split, see _hash_doinsert().
                               1557                 :  */
                               1558                 : Buffer
                               1559          345803 : _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
                               1560                 :                                 HashMetaPage *cachedmetap)
                               1561                 : {
                               1562                 :     HashMetaPage metap;
                               1563                 :     Buffer      buf;
                               1564          345803 :     Buffer      metabuf = InvalidBuffer;
                               1565                 :     Page        page;
                               1566                 :     Bucket      bucket;
                               1567                 :     BlockNumber blkno;
                               1568                 :     HashPageOpaque opaque;
                               1569                 : 
                               1570                 :     /* We read from target bucket buffer, hence locking is must. */
                               1571          345803 :     Assert(access == HASH_READ || access == HASH_WRITE);
                               1572                 : 
                               1573          345803 :     metap = _hash_getcachedmetap(rel, &metabuf, false);
                               1574          345803 :     Assert(metap != NULL);
                               1575                 : 
                               1576                 :     /*
                               1577                 :      * Loop until we get a lock on the correct target bucket.
                               1578                 :      */
                               1579                 :     for (;;)
                               1580                 :     {
                               1581                 :         /*
                               1582                 :          * Compute the target bucket number, and convert to block number.
                               1583                 :          */
                               1584          346104 :         bucket = _hash_hashkey2bucket(hashkey,
                               1585                 :                                       metap->hashm_maxbucket,
                               1586                 :                                       metap->hashm_highmask,
                               1587                 :                                       metap->hashm_lowmask);
                               1588                 : 
                               1589          346104 :         blkno = BUCKET_TO_BLKNO(metap, bucket);
                               1590                 : 
                               1591                 :         /* Fetch the primary bucket page for the bucket */
                               1592          346104 :         buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
                               1593          346104 :         page = BufferGetPage(buf);
  373 michael                  1594          346104 :         opaque = HashPageGetOpaque(page);
 2252 rhaas                    1595          346104 :         Assert(opaque->hasho_bucket == bucket);
 2161                          1596          346104 :         Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
                               1597                 : 
                               1598                 :         /*
                               1599                 :          * If this bucket hasn't been split, we're done.
                               1600                 :          */
                               1601          346104 :         if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
 2252                          1602          345803 :             break;
                               1603                 : 
                               1604                 :         /* Drop lock on this buffer, update cached metapage, and retry. */
                               1605             301 :         _hash_relbuf(rel, buf);
                               1606             301 :         metap = _hash_getcachedmetap(rel, &metabuf, true);
                               1607             301 :         Assert(metap != NULL);
                               1608                 :     }
                               1609                 : 
                               1610          345803 :     if (BufferIsValid(metabuf))
                               1611             540 :         _hash_dropbuf(rel, metabuf);
                               1612                 : 
                               1613          345803 :     if (cachedmetap)
                               1614          345510 :         *cachedmetap = metap;
                               1615                 : 
                               1616          345803 :     return buf;
                               1617                 : }
        

Generated by: LCOV version v1.16-55-g56c0a2a