LCOV - differential code coverage report
Current view: top level - src/backend/executor - nodeHash.c (source / functions) Coverage Total Hit UNC LBC UIC UBC GBC GIC GNC CBC EUB ECB DCB
Current: Differential Code Coverage HEAD vs 15 Lines: 95.6 % 1170 1118 5 13 29 5 19 604 87 408 28 667 30
Current Date: 2023-04-08 15:15:32 Functions: 98.2 % 55 54 1 46 4 4 47 1
Baseline: 15
Baseline Date: 2023-04-08 15:09:40
Legend: Lines: hit not hit

           TLA  Line data    Source code
       1                 : /*-------------------------------------------------------------------------
       2                 :  *
       3                 :  * nodeHash.c
       4                 :  *    Routines to hash relations for hashjoin
       5                 :  *
       6                 :  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
       7                 :  * Portions Copyright (c) 1994, Regents of the University of California
       8                 :  *
       9                 :  *
      10                 :  * IDENTIFICATION
      11                 :  *    src/backend/executor/nodeHash.c
      12                 :  *
      13                 :  * See note on parallelism in nodeHashjoin.c.
      14                 :  *
      15                 :  *-------------------------------------------------------------------------
      16                 :  */
      17                 : /*
      18                 :  * INTERFACE ROUTINES
      19                 :  *      MultiExecHash   - generate an in-memory hash table of the relation
      20                 :  *      ExecInitHash    - initialize node and subnodes
      21                 :  *      ExecEndHash     - shutdown node and subnodes
      22                 :  */
      23                 : 
      24                 : #include "postgres.h"
      25                 : 
      26                 : #include <math.h>
      27                 : #include <limits.h>
      28                 : 
      29                 : #include "access/htup_details.h"
      30                 : #include "access/parallel.h"
      31                 : #include "catalog/pg_statistic.h"
      32                 : #include "commands/tablespace.h"
      33                 : #include "executor/execdebug.h"
      34                 : #include "executor/hashjoin.h"
      35                 : #include "executor/nodeHash.h"
      36                 : #include "executor/nodeHashjoin.h"
      37                 : #include "miscadmin.h"
      38                 : #include "pgstat.h"
      39                 : #include "port/atomics.h"
      40                 : #include "port/pg_bitutils.h"
      41                 : #include "utils/dynahash.h"
      42                 : #include "utils/guc.h"
      43                 : #include "utils/lsyscache.h"
      44                 : #include "utils/memutils.h"
      45                 : #include "utils/syscache.h"
      46                 : 
      47                 : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
      48                 : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
      49                 : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
      50                 : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
      51                 : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
      52                 :                                   int mcvsToUse);
      53                 : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
      54                 :                                     TupleTableSlot *slot,
      55                 :                                     uint32 hashvalue,
      56                 :                                     int bucketNumber);
      57                 : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
      58                 : 
      59                 : static void *dense_alloc(HashJoinTable hashtable, Size size);
      60                 : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
      61                 :                                                 size_t size,
      62                 :                                                 dsa_pointer *shared);
      63                 : static void MultiExecPrivateHash(HashState *node);
      64                 : static void MultiExecParallelHash(HashState *node);
      65                 : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
      66                 :                                                        int bucketno);
      67                 : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
      68                 :                                                       HashJoinTuple tuple);
      69                 : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
      70                 :                                              HashJoinTuple tuple,
      71                 :                                              dsa_pointer tuple_shared);
      72                 : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
      73                 : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
      74                 : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
      75                 : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
      76                 : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
      77                 :                                                      dsa_pointer *shared);
      78                 : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
      79                 :                                           int batchno,
      80                 :                                           size_t size);
      81                 : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
      82                 : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
      83                 : 
      84                 : 
      85                 : /* ----------------------------------------------------------------
      86                 :  *      ExecHash
      87                 :  *
      88                 :  *      stub for pro forma compliance
      89                 :  * ----------------------------------------------------------------
      90                 :  */
      91                 : static TupleTableSlot *
      92 UBC           0 : ExecHash(PlanState *pstate)
      93                 : {
      94               0 :     elog(ERROR, "Hash node does not support ExecProcNode call convention");
      95                 :     return NULL;
      96                 : }
      97                 : 
      98                 : /* ----------------------------------------------------------------
      99                 :  *      MultiExecHash
     100                 :  *
     101                 :  *      build hash table for hashjoin, doing partitioning if more
     102                 :  *      than one batch is required.
     103                 :  * ----------------------------------------------------------------
     104                 :  */
     105                 : Node *
     106 CBC        9886 : MultiExecHash(HashState *node)
     107                 : {
     108                 :     /* must provide our own instrumentation support */
     109            9886 :     if (node->ps.instrument)
     110             164 :         InstrStartNode(node->ps.instrument);
     111                 : 
     112            9886 :     if (node->parallel_state != NULL)
     113             189 :         MultiExecParallelHash(node);
     114                 :     else
     115            9697 :         MultiExecPrivateHash(node);
     116                 : 
     117                 :     /* must provide our own instrumentation support */
     118            9886 :     if (node->ps.instrument)
     119             164 :         InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
     120                 : 
     121                 :     /*
     122                 :      * We do not return the hash table directly because it's not a subtype of
     123                 :      * Node, and so would violate the MultiExecProcNode API.  Instead, our
     124                 :      * parent Hashjoin node is expected to know how to fish it out of our node
     125                 :      * state.  Ugly but not really worth cleaning up, since Hashjoin knows
     126                 :      * quite a bit more about Hash besides that.
     127                 :      */
     128            9886 :     return NULL;
     129                 : }
     130                 : 
     131                 : /* ----------------------------------------------------------------
     132                 :  *      MultiExecPrivateHash
     133                 :  *
     134                 :  *      parallel-oblivious version, building a backend-private
     135                 :  *      hash table and (if necessary) batch files.
     136                 :  * ----------------------------------------------------------------
     137                 :  */
     138                 : static void
     139            9697 : MultiExecPrivateHash(HashState *node)
     140                 : {
     141                 :     PlanState  *outerNode;
     142                 :     List       *hashkeys;
     143                 :     HashJoinTable hashtable;
     144                 :     TupleTableSlot *slot;
     145                 :     ExprContext *econtext;
     146                 :     uint32      hashvalue;
     147                 : 
     148                 :     /*
     149                 :      * get state info from node
     150                 :      */
     151            9697 :     outerNode = outerPlanState(node);
     152            9697 :     hashtable = node->hashtable;
     153                 : 
     154                 :     /*
     155                 :      * set expression context
     156                 :      */
     157            9697 :     hashkeys = node->hashkeys;
     158            9697 :     econtext = node->ps.ps_ExprContext;
     159                 : 
     160                 :     /*
     161                 :      * Get all tuples from the node below the Hash node and insert into the
     162                 :      * hash table (or temp files).
     163                 :      */
     164                 :     for (;;)
     165                 :     {
     166         5343452 :         slot = ExecProcNode(outerNode);
     167         5343452 :         if (TupIsNull(slot))
     168                 :             break;
     169                 :         /* We have to compute the hash value */
     170         5333755 :         econtext->ecxt_outertuple = slot;
     171         5333755 :         if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     172         5333755 :                                  false, hashtable->keepNulls,
     173                 :                                  &hashvalue))
     174                 :         {
     175                 :             int         bucketNumber;
     176                 : 
     177         5333749 :             bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
     178         5333749 :             if (bucketNumber != INVALID_SKEW_BUCKET_NO)
     179                 :             {
     180                 :                 /* It's a skew tuple, so put it into that hash table */
     181             294 :                 ExecHashSkewTableInsert(hashtable, slot, hashvalue,
     182                 :                                         bucketNumber);
     183             294 :                 hashtable->skewTuples += 1;
     184                 :             }
     185                 :             else
     186                 :             {
     187                 :                 /* Not subject to skew optimization, so insert normally */
     188         5333455 :                 ExecHashTableInsert(hashtable, slot, hashvalue);
     189                 :             }
     190         5333749 :             hashtable->totalTuples += 1;
     191                 :         }
     192                 :     }
     193                 : 
     194                 :     /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
     195            9697 :     if (hashtable->nbuckets != hashtable->nbuckets_optimal)
     196              59 :         ExecHashIncreaseNumBuckets(hashtable);
     197                 : 
     198                 :     /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
     199            9697 :     hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
     200            9697 :     if (hashtable->spaceUsed > hashtable->spacePeak)
     201            9675 :         hashtable->spacePeak = hashtable->spaceUsed;
     202                 : 
     203            9697 :     hashtable->partialTuples = hashtable->totalTuples;
     204            9697 : }
     205                 : 
     206                 : /* ----------------------------------------------------------------
     207                 :  *      MultiExecParallelHash
     208                 :  *
     209                 :  *      parallel-aware version, building a shared hash table and
     210                 :  *      (if necessary) batch files using the combined effort of
     211                 :  *      a set of co-operating backends.
     212                 :  * ----------------------------------------------------------------
     213                 :  */
     214                 : static void
     215             189 : MultiExecParallelHash(HashState *node)
     216                 : {
     217                 :     ParallelHashJoinState *pstate;
     218                 :     PlanState  *outerNode;
     219                 :     List       *hashkeys;
     220                 :     HashJoinTable hashtable;
     221                 :     TupleTableSlot *slot;
     222                 :     ExprContext *econtext;
     223                 :     uint32      hashvalue;
     224                 :     Barrier    *build_barrier;
     225                 :     int         i;
     226                 : 
     227                 :     /*
     228                 :      * get state info from node
     229                 :      */
     230             189 :     outerNode = outerPlanState(node);
     231             189 :     hashtable = node->hashtable;
     232                 : 
     233                 :     /*
     234                 :      * set expression context
     235                 :      */
     236             189 :     hashkeys = node->hashkeys;
     237             189 :     econtext = node->ps.ps_ExprContext;
     238                 : 
     239                 :     /*
     240                 :      * Synchronize the parallel hash table build.  At this stage we know that
     241                 :      * the shared hash table has been or is being set up by
     242                 :      * ExecHashTableCreate(), but we don't know if our peers have returned
     243                 :      * from there or are here in MultiExecParallelHash(), and if so how far
     244                 :      * through they are.  To find out, we check the build_barrier phase then
     245                 :      * and jump to the right step in the build algorithm.
     246                 :      */
     247             189 :     pstate = hashtable->parallel_state;
     248             189 :     build_barrier = &pstate->build_barrier;
     249 GNC         189 :     Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
     250 CBC         189 :     switch (BarrierPhase(build_barrier))
     251                 :     {
     252 GNC          90 :         case PHJ_BUILD_ALLOCATE:
     253                 : 
     254                 :             /*
     255                 :              * Either I just allocated the initial hash table in
     256                 :              * ExecHashTableCreate(), or someone else is doing that.  Either
     257                 :              * way, wait for everyone to arrive here so we can proceed.
     258                 :              */
     259 CBC          90 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
     260                 :             /* Fall through. */
     261                 : 
     262 GNC         136 :         case PHJ_BUILD_HASH_INNER:
     263                 : 
     264                 :             /*
     265                 :              * It's time to begin hashing, or if we just arrived here then
     266                 :              * hashing is already underway, so join in that effort.  While
     267                 :              * hashing we have to be prepared to help increase the number of
     268                 :              * batches or buckets at any time, and if we arrived here when
     269                 :              * that was already underway we'll have to help complete that work
     270                 :              * immediately so that it's safe to access batches and buckets
     271                 :              * below.
     272                 :              */
     273 CBC         136 :             if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
     274                 :                 PHJ_GROW_BATCHES_ELECT)
     275               1 :                 ExecParallelHashIncreaseNumBatches(hashtable);
     276             136 :             if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
     277                 :                 PHJ_GROW_BUCKETS_ELECT)
     278 UBC           0 :                 ExecParallelHashIncreaseNumBuckets(hashtable);
     279 CBC         136 :             ExecParallelHashEnsureBatchAccessors(hashtable);
     280             136 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
     281                 :             for (;;)
     282                 :             {
     283         1080196 :                 slot = ExecProcNode(outerNode);
     284         1080196 :                 if (TupIsNull(slot))
     285                 :                     break;
     286         1080060 :                 econtext->ecxt_outertuple = slot;
     287         1080060 :                 if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
     288         1080060 :                                          false, hashtable->keepNulls,
     289                 :                                          &hashvalue))
     290         1080060 :                     ExecParallelHashTableInsert(hashtable, slot, hashvalue);
     291         1080060 :                 hashtable->partialTuples++;
     292                 :             }
     293                 : 
     294                 :             /*
     295                 :              * Make sure that any tuples we wrote to disk are visible to
     296                 :              * others before anyone tries to load them.
     297                 :              */
     298             779 :             for (i = 0; i < hashtable->nbatch; ++i)
     299             643 :                 sts_end_write(hashtable->batches[i].inner_tuples);
     300                 : 
     301                 :             /*
     302                 :              * Update shared counters.  We need an accurate total tuple count
     303                 :              * to control the empty table optimization.
     304                 :              */
     305             136 :             ExecParallelHashMergeCounters(hashtable);
     306                 : 
     307             136 :             BarrierDetach(&pstate->grow_buckets_barrier);
     308             136 :             BarrierDetach(&pstate->grow_batches_barrier);
     309                 : 
     310                 :             /*
     311                 :              * Wait for everyone to finish building and flushing files and
     312                 :              * counters.
     313                 :              */
     314             136 :             if (BarrierArriveAndWait(build_barrier,
     315                 :                                      WAIT_EVENT_HASH_BUILD_HASH_INNER))
     316                 :             {
     317                 :                 /*
     318                 :                  * Elect one backend to disable any further growth.  Batches
     319                 :                  * are now fixed.  While building them we made sure they'd fit
     320                 :                  * in our memory budget when we load them back in later (or we
     321                 :                  * tried to do that and gave up because we detected extreme
     322                 :                  * skew).
     323                 :                  */
     324              81 :                 pstate->growth = PHJ_GROWTH_DISABLED;
     325                 :             }
     326                 :     }
     327                 : 
     328                 :     /*
     329                 :      * We're not yet attached to a batch.  We all agree on the dimensions and
     330                 :      * number of inner tuples (for the empty table optimization).
     331                 :      */
     332             189 :     hashtable->curbatch = -1;
     333             189 :     hashtable->nbuckets = pstate->nbuckets;
     334             189 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
     335             189 :     hashtable->totalTuples = pstate->total_tuples;
     336                 : 
     337                 :     /*
     338                 :      * Unless we're completely done and the batch state has been freed, make
     339                 :      * sure we have accessors.
     340                 :      */
     341 GNC         189 :     if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
     342 CBC         189 :         ExecParallelHashEnsureBatchAccessors(hashtable);
     343                 : 
     344                 :     /*
     345                 :      * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
     346                 :      * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
     347                 :      * there already).
     348                 :      */
     349 GNC         189 :     Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
     350                 :            BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
     351                 :            BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
     352 CBC         189 : }
     353                 : 
     354                 : /* ----------------------------------------------------------------
     355                 :  *      ExecInitHash
     356                 :  *
     357                 :  *      Init routine for Hash node
     358                 :  * ----------------------------------------------------------------
     359                 :  */
     360                 : HashState *
     361           14214 : ExecInitHash(Hash *node, EState *estate, int eflags)
     362                 : {
     363                 :     HashState  *hashstate;
     364                 : 
     365                 :     /* check for unsupported flags */
     366           14214 :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
     367                 : 
     368                 :     /*
     369                 :      * create state structure
     370                 :      */
     371           14214 :     hashstate = makeNode(HashState);
     372           14214 :     hashstate->ps.plan = (Plan *) node;
     373           14214 :     hashstate->ps.state = estate;
     374           14214 :     hashstate->ps.ExecProcNode = ExecHash;
     375           14214 :     hashstate->hashtable = NULL;
     376           14214 :     hashstate->hashkeys = NIL;   /* will be set by parent HashJoin */
     377                 : 
     378                 :     /*
     379                 :      * Miscellaneous initialization
     380                 :      *
     381                 :      * create expression context for node
     382                 :      */
     383           14214 :     ExecAssignExprContext(estate, &hashstate->ps);
     384                 : 
     385                 :     /*
     386                 :      * initialize child nodes
     387                 :      */
     388           14214 :     outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
     389                 : 
     390                 :     /*
     391                 :      * initialize our result slot and type. No need to build projection
     392                 :      * because this node doesn't do projections.
     393                 :      */
     394           14214 :     ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
     395           14214 :     hashstate->ps.ps_ProjInfo = NULL;
     396                 : 
     397                 :     /*
     398                 :      * initialize child expressions
     399                 :      */
     400           14214 :     Assert(node->plan.qual == NIL);
     401           14214 :     hashstate->hashkeys =
     402           14214 :         ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
     403                 : 
     404           14214 :     return hashstate;
     405                 : }
     406                 : 
     407                 : /* ---------------------------------------------------------------
     408                 :  *      ExecEndHash
     409                 :  *
     410                 :  *      clean up routine for Hash node
     411                 :  * ----------------------------------------------------------------
     412                 :  */
     413                 : void
     414           14172 : ExecEndHash(HashState *node)
     415                 : {
     416                 :     PlanState  *outerPlan;
     417                 : 
     418                 :     /*
     419                 :      * free exprcontext
     420                 :      */
     421           14172 :     ExecFreeExprContext(&node->ps);
     422                 : 
     423                 :     /*
     424                 :      * shut down the subplan
     425                 :      */
     426           14172 :     outerPlan = outerPlanState(node);
     427           14172 :     ExecEndNode(outerPlan);
     428           14172 : }
     429                 : 
     430                 : 
     431                 : /* ----------------------------------------------------------------
     432                 :  *      ExecHashTableCreate
     433                 :  *
     434                 :  *      create an empty hashtable data structure for hashjoin.
     435                 :  * ----------------------------------------------------------------
     436                 :  */
     437                 : HashJoinTable
     438            9886 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
     439                 : {
     440                 :     Hash       *node;
     441                 :     HashJoinTable hashtable;
     442                 :     Plan       *outerNode;
     443                 :     size_t      space_allowed;
     444                 :     int         nbuckets;
     445                 :     int         nbatch;
     446                 :     double      rows;
     447                 :     int         num_skew_mcvs;
     448                 :     int         log2_nbuckets;
     449                 :     int         nkeys;
     450                 :     int         i;
     451                 :     ListCell   *ho;
     452                 :     ListCell   *hc;
     453                 :     MemoryContext oldcxt;
     454                 : 
     455                 :     /*
     456                 :      * Get information about the size of the relation to be hashed (it's the
     457                 :      * "outer" subtree of this node, but the inner relation of the hashjoin).
     458                 :      * Compute the appropriate size of the hash table.
     459                 :      */
     460            9886 :     node = (Hash *) state->ps.plan;
     461            9886 :     outerNode = outerPlan(node);
     462                 : 
     463                 :     /*
     464                 :      * If this is shared hash table with a partial plan, then we can't use
     465                 :      * outerNode->plan_rows to estimate its size.  We need an estimate of the
     466                 :      * total number of rows across all copies of the partial plan.
     467                 :      */
     468            9886 :     rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
     469                 : 
     470            9697 :     ExecChooseHashTableSize(rows, outerNode->plan_width,
     471            9886 :                             OidIsValid(node->skewTable),
     472            9886 :                             state->parallel_state != NULL,
     473            9886 :                             state->parallel_state != NULL ?
     474             189 :                             state->parallel_state->nparticipants - 1 : 0,
     475                 :                             &space_allowed,
     476                 :                             &nbuckets, &nbatch, &num_skew_mcvs);
     477                 : 
     478                 :     /* nbuckets must be a power of 2 */
     479            9886 :     log2_nbuckets = my_log2(nbuckets);
     480            9886 :     Assert(nbuckets == (1 << log2_nbuckets));
     481                 : 
     482                 :     /*
     483                 :      * Initialize the hash table control block.
     484                 :      *
     485                 :      * The hashtable control block is just palloc'd from the executor's
     486                 :      * per-query memory context.  Everything else should be kept inside the
     487                 :      * subsidiary hashCxt or batchCxt.
     488                 :      */
     489 GNC        9886 :     hashtable = palloc_object(HashJoinTableData);
     490 CBC        9886 :     hashtable->nbuckets = nbuckets;
     491            9886 :     hashtable->nbuckets_original = nbuckets;
     492            9886 :     hashtable->nbuckets_optimal = nbuckets;
     493            9886 :     hashtable->log2_nbuckets = log2_nbuckets;
     494            9886 :     hashtable->log2_nbuckets_optimal = log2_nbuckets;
     495            9886 :     hashtable->buckets.unshared = NULL;
     496            9886 :     hashtable->keepNulls = keepNulls;
     497            9886 :     hashtable->skewEnabled = false;
     498            9886 :     hashtable->skewBucket = NULL;
     499            9886 :     hashtable->skewBucketLen = 0;
     500            9886 :     hashtable->nSkewBuckets = 0;
     501            9886 :     hashtable->skewBucketNums = NULL;
     502            9886 :     hashtable->nbatch = nbatch;
     503            9886 :     hashtable->curbatch = 0;
     504            9886 :     hashtable->nbatch_original = nbatch;
     505            9886 :     hashtable->nbatch_outstart = nbatch;
     506            9886 :     hashtable->growEnabled = true;
     507            9886 :     hashtable->totalTuples = 0;
     508            9886 :     hashtable->partialTuples = 0;
     509            9886 :     hashtable->skewTuples = 0;
     510            9886 :     hashtable->innerBatchFile = NULL;
     511            9886 :     hashtable->outerBatchFile = NULL;
     512            9886 :     hashtable->spaceUsed = 0;
     513            9886 :     hashtable->spacePeak = 0;
     514            9886 :     hashtable->spaceAllowed = space_allowed;
     515            9886 :     hashtable->spaceUsedSkew = 0;
     516            9886 :     hashtable->spaceAllowedSkew =
     517            9886 :         hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
     518            9886 :     hashtable->chunks = NULL;
     519            9886 :     hashtable->current_chunk = NULL;
     520            9886 :     hashtable->parallel_state = state->parallel_state;
     521            9886 :     hashtable->area = state->ps.state->es_query_dsa;
     522            9886 :     hashtable->batches = NULL;
     523                 : 
     524                 : #ifdef HJDEBUG
     525                 :     printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
     526                 :            hashtable, nbatch, nbuckets);
     527                 : #endif
     528                 : 
     529                 :     /*
     530                 :      * Create temporary memory contexts in which to keep the hashtable working
     531                 :      * storage.  See notes in executor/hashjoin.h.
     532                 :      */
     533            9886 :     hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
     534                 :                                                "HashTableContext",
     535                 :                                                ALLOCSET_DEFAULT_SIZES);
     536                 : 
     537            9886 :     hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
     538                 :                                                 "HashBatchContext",
     539                 :                                                 ALLOCSET_DEFAULT_SIZES);
     540                 : 
     541                 :     /* Allocate data that will live for the life of the hashjoin */
     542                 : 
     543            9886 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     544                 : 
     545                 :     /*
     546                 :      * Get info about the hash functions to be used for each hash key. Also
     547                 :      * remember whether the join operators are strict.
     548                 :      */
     549            9886 :     nkeys = list_length(hashOperators);
     550 GNC        9886 :     hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
     551            9886 :     hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
     552            9886 :     hashtable->hashStrict = palloc_array(bool, nkeys);
     553            9886 :     hashtable->collations = palloc_array(Oid, nkeys);
     554 GIC        9886 :     i = 0;
     555 CBC       20427 :     forboth(ho, hashOperators, hc, hashCollations)
     556                 :     {
     557 GIC       10541 :         Oid         hashop = lfirst_oid(ho);
     558                 :         Oid         left_hashfn;
     559 ECB             :         Oid         right_hashfn;
     560 EUB             : 
     561 GIC       10541 :         if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
     562 LBC           0 :             elog(ERROR, "could not find hash function for hash operator %u",
     563 ECB             :                  hashop);
     564 CBC       10541 :         fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
     565           10541 :         fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
     566           10541 :         hashtable->hashStrict[i] = op_strict(hashop);
     567 GIC       10541 :         hashtable->collations[i] = lfirst_oid(hc);
     568           10541 :         i++;
     569 ECB             :     }
     570                 : 
     571 GIC        9886 :     if (nbatch > 1 && hashtable->parallel_state == NULL)
     572                 :     {
     573                 :         /*
     574                 :          * allocate and initialize the file arrays in hashCxt (not needed for
     575 ECB             :          * parallel case which uses shared tuplestores instead of raw files)
     576                 :          */
     577 GNC          60 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     578              60 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     579                 :         /* The files will not be opened until needed... */
     580 ECB             :         /* ... but make sure we have temp tablespaces established for them */
     581 GIC          60 :         PrepareTempTablespaces();
     582 ECB             :     }
     583                 : 
     584 CBC        9886 :     MemoryContextSwitchTo(oldcxt);
     585                 : 
     586 GIC        9886 :     if (hashtable->parallel_state)
     587                 :     {
     588             189 :         ParallelHashJoinState *pstate = hashtable->parallel_state;
     589                 :         Barrier    *build_barrier;
     590                 : 
     591                 :         /*
     592                 :          * Attach to the build barrier.  The corresponding detach operation is
     593                 :          * in ExecHashTableDetach.  Note that we won't attach to the
     594                 :          * batch_barrier for batch 0 yet.  We'll attach later and start it out
     595                 :          * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
     596                 :          * then loaded while hashing (the standard hybrid hash join
     597                 :          * algorithm), and we'll coordinate that using build_barrier.
     598                 :          */
     599             189 :         build_barrier = &pstate->build_barrier;
     600             189 :         BarrierAttach(build_barrier);
     601                 : 
     602                 :         /*
     603                 :          * So far we have no idea whether there are any other participants,
     604                 :          * and if so, what phase they are working on.  The only thing we care
     605 ECB             :          * about at this point is whether someone has already created the
     606                 :          * SharedHashJoinBatch objects and the hash table for batch 0.  One
     607                 :          * backend will be elected to do that now if necessary.
     608                 :          */
     609 GNC         270 :         if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
     610 CBC          81 :             BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
     611                 :         {
     612 GIC          81 :             pstate->nbatch = nbatch;
     613 CBC          81 :             pstate->space_allowed = space_allowed;
     614 GIC          81 :             pstate->growth = PHJ_GROWTH_OK;
     615                 : 
     616                 :             /* Set up the shared state for coordinating batches. */
     617              81 :             ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
     618                 : 
     619 ECB             :             /*
     620                 :              * Allocate batch 0's hash table up front so we can load it
     621                 :              * directly while hashing.
     622                 :              */
     623 GIC          81 :             pstate->nbuckets = nbuckets;
     624              81 :             ExecParallelHashTableAlloc(hashtable, 0);
     625                 :         }
     626                 : 
     627                 :         /*
     628                 :          * The next Parallel Hash synchronization point is in
     629                 :          * MultiExecParallelHash(), which will progress it all the way to
     630                 :          * PHJ_BUILD_RUN.  The caller must not return control from this
     631                 :          * executor node between now and then.
     632                 :          */
     633                 :     }
     634                 :     else
     635                 :     {
     636 ECB             :         /*
     637                 :          * Prepare context for the first-scan space allocations; allocate the
     638                 :          * hashbucket array therein, and set each bucket "empty".
     639                 :          */
     640 GIC        9697 :         MemoryContextSwitchTo(hashtable->batchCxt);
     641                 : 
     642 GNC        9697 :         hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
     643                 : 
     644 ECB             :         /*
     645                 :          * Set up for skew optimization, if possible and there's a need for
     646                 :          * more than one batch.  (In a one-batch join, there's no point in
     647                 :          * it.)
     648                 :          */
     649 GIC        9697 :         if (nbatch > 1)
     650 CBC          60 :             ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
     651                 : 
     652 GIC        9697 :         MemoryContextSwitchTo(oldcxt);
     653                 :     }
     654                 : 
     655            9886 :     return hashtable;
     656                 : }
     657                 : 
     658                 : 
     659                 : /*
     660                 :  * Compute appropriate size for hashtable given the estimated size of the
     661                 :  * relation to be hashed (number of rows and average row width).
     662                 :  *
     663                 :  * This is exported so that the planner's costsize.c can use it.
     664                 :  */
     665 ECB             : 
     666                 : /* Target bucket loading (tuples per bucket) */
     667                 : #define NTUP_PER_BUCKET         1
     668                 : 
     669                 : void
     670 GIC      236685 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
     671                 :                         bool try_combined_hash_mem,
     672                 :                         int parallel_workers,
     673                 :                         size_t *space_allowed,
     674                 :                         int *numbuckets,
     675                 :                         int *numbatches,
     676                 :                         int *num_skew_mcvs)
     677                 : {
     678 ECB             :     int         tupsize;
     679                 :     double      inner_rel_bytes;
     680                 :     size_t      hash_table_bytes;
     681                 :     size_t      bucket_bytes;
     682                 :     size_t      max_pointers;
     683 CBC      236685 :     int         nbatch = 1;
     684 ECB             :     int         nbuckets;
     685                 :     double      dbuckets;
     686                 : 
     687                 :     /* Force a plausible relation size if no info */
     688 GIC      236685 :     if (ntuples <= 0.0)
     689              75 :         ntuples = 1000.0;
     690                 : 
     691 ECB             :     /*
     692                 :      * Estimate tupsize based on footprint of tuple in hashtable... note this
     693                 :      * does not allow for any palloc overhead.  The manipulations of spaceUsed
     694                 :      * don't count palloc overhead either.
     695                 :      */
     696 GIC      236685 :     tupsize = HJTUPLE_OVERHEAD +
     697          236685 :         MAXALIGN(SizeofMinimalTupleHeader) +
     698          236685 :         MAXALIGN(tupwidth);
     699 CBC      236685 :     inner_rel_bytes = ntuples * tupsize;
     700                 : 
     701                 :     /*
     702                 :      * Compute in-memory hashtable size limit from GUCs.
     703                 :      */
     704 GIC      236685 :     hash_table_bytes = get_hash_memory_limit();
     705                 : 
     706 ECB             :     /*
     707                 :      * Parallel Hash tries to use the combined hash_mem of all workers to
     708                 :      * avoid the need to batch.  If that won't work, it falls back to hash_mem
     709                 :      * per worker and tries to process batches in parallel.
     710                 :      */
     711 CBC      236685 :     if (try_combined_hash_mem)
     712 ECB             :     {
     713                 :         /* Careful, this could overflow size_t */
     714                 :         double      newlimit;
     715                 : 
     716 CBC        5778 :         newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
     717 GIC        5778 :         newlimit = Min(newlimit, (double) SIZE_MAX);
     718            5778 :         hash_table_bytes = (size_t) newlimit;
     719                 :     }
     720                 : 
     721          236685 :     *space_allowed = hash_table_bytes;
     722                 : 
     723                 :     /*
     724                 :      * If skew optimization is possible, estimate the number of skew buckets
     725                 :      * that will fit in the memory allowed, and decrement the assumed space
     726                 :      * available for the main hash table accordingly.
     727                 :      *
     728                 :      * We make the optimistic assumption that each skew bucket will contain
     729                 :      * one inner-relation tuple.  If that turns out to be low, we will recover
     730                 :      * at runtime by reducing the number of skew buckets.
     731                 :      *
     732 ECB             :      * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
     733                 :      * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
     734                 :      * will round up to the next power of 2 and then multiply by 4 to reduce
     735                 :      * collisions.
     736                 :      */
     737 GIC      236685 :     if (useskew)
     738                 :     {
     739                 :         size_t      bytes_per_mcv;
     740                 :         size_t      skew_mcvs;
     741                 : 
     742                 :         /*----------
     743                 :          * Compute number of MCVs we could hold in hash_table_bytes
     744                 :          *
     745                 :          * Divisor is:
     746                 :          * size of a hash tuple +
     747 ECB             :          * worst-case size of skewBucket[] per MCV +
     748                 :          * size of skewBucketNums[] entry +
     749                 :          * size of skew bucket struct itself
     750                 :          *----------
     751                 :          */
     752 GIC      234854 :         bytes_per_mcv = tupsize +
     753                 :             (8 * sizeof(HashSkewBucket *)) +
     754          234854 :             sizeof(int) +
     755                 :             SKEW_BUCKET_OVERHEAD;
     756          234854 :         skew_mcvs = hash_table_bytes / bytes_per_mcv;
     757 ECB             : 
     758                 :         /*
     759                 :          * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
     760                 :          * not to worry about size_t overflow in the multiplication)
     761                 :          */
     762 CBC      234854 :         skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
     763                 : 
     764                 :         /* Now clamp to integer range */
     765          234854 :         skew_mcvs = Min(skew_mcvs, INT_MAX);
     766 ECB             : 
     767 GIC      234854 :         *num_skew_mcvs = (int) skew_mcvs;
     768                 : 
     769 ECB             :         /* Reduce hash_table_bytes by the amount needed for the skew table */
     770 GIC      234854 :         if (skew_mcvs > 0)
     771          234854 :             hash_table_bytes -= skew_mcvs * bytes_per_mcv;
     772                 :     }
     773                 :     else
     774            1831 :         *num_skew_mcvs = 0;
     775                 : 
     776                 :     /*
     777                 :      * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
     778                 :      * memory is filled, assuming a single batch; but limit the value so that
     779                 :      * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
     780 ECB             :      * nor MaxAllocSize.
     781                 :      *
     782                 :      * Note that both nbuckets and nbatch must be powers of 2 to make
     783                 :      * ExecHashGetBucketAndBatch fast.
     784                 :      */
     785 GIC      236685 :     max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
     786          236685 :     max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
     787 ECB             :     /* If max_pointers isn't a power of 2, must round it down to one */
     788 GIC      236685 :     max_pointers = pg_prevpower2_size_t(max_pointers);
     789 ECB             : 
     790                 :     /* Also ensure we avoid integer overflow in nbatch and nbuckets */
     791                 :     /* (this step is redundant given the current value of MaxAllocSize) */
     792 GIC      236685 :     max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
     793 ECB             : 
     794 GIC      236685 :     dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
     795 CBC      236685 :     dbuckets = Min(dbuckets, max_pointers);
     796 GIC      236685 :     nbuckets = (int) dbuckets;
     797                 :     /* don't let nbuckets be really small, though ... */
     798          236685 :     nbuckets = Max(nbuckets, 1024);
     799                 :     /* ... and force it to be a power of 2. */
     800          236685 :     nbuckets = pg_nextpower2_32(nbuckets);
     801 ECB             : 
     802                 :     /*
     803                 :      * If there's not enough space to store the projected number of tuples and
     804                 :      * the required bucket headers, we will need multiple batches.
     805                 :      */
     806 GIC      236685 :     bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
     807          236685 :     if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
     808                 :     {
     809                 :         /* We'll need multiple batches */
     810                 :         size_t      sbuckets;
     811                 :         double      dbatch;
     812                 :         int         minbatch;
     813                 :         size_t      bucket_size;
     814 ECB             : 
     815                 :         /*
     816                 :          * If Parallel Hash with combined hash_mem would still need multiple
     817                 :          * batches, we'll have to fall back to regular hash_mem budget.
     818                 :          */
     819 GIC        2407 :         if (try_combined_hash_mem)
     820                 :         {
     821             123 :             ExecChooseHashTableSize(ntuples, tupwidth, useskew,
     822 ECB             :                                     false, parallel_workers,
     823                 :                                     space_allowed,
     824                 :                                     numbuckets,
     825                 :                                     numbatches,
     826                 :                                     num_skew_mcvs);
     827 GIC         123 :             return;
     828                 :         }
     829                 : 
     830                 :         /*
     831 ECB             :          * Estimate the number of buckets we'll want to have when hash_mem is
     832                 :          * entirely full.  Each bucket will contain a bucket pointer plus
     833 EUB             :          * NTUP_PER_BUCKET tuples, whose projected size already includes
     834                 :          * overhead for the hash code, pointer to the next tuple, etc.
     835 ECB             :          */
     836 CBC        2284 :         bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
     837            2284 :         if (hash_table_bytes <= bucket_size)
     838 LBC           0 :             sbuckets = 1;       /* avoid pg_nextpower2_size_t(0) */
     839 ECB             :         else
     840 GIC        2284 :             sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
     841            2284 :         sbuckets = Min(sbuckets, max_pointers);
     842            2284 :         nbuckets = (int) sbuckets;
     843            2284 :         nbuckets = pg_nextpower2_32(nbuckets);
     844            2284 :         bucket_bytes = nbuckets * sizeof(HashJoinTuple);
     845                 : 
     846                 :         /*
     847                 :          * Buckets are simple pointers to hashjoin tuples, while tupsize
     848                 :          * includes the pointer, hash code, and MinimalTupleData.  So buckets
     849 ECB             :          * should never really exceed 25% of hash_mem (even for
     850                 :          * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
     851                 :          * 2^N bytes, where we might get more because of doubling. So let's
     852                 :          * look for 50% here.
     853                 :          */
     854 CBC        2284 :         Assert(bucket_bytes <= hash_table_bytes / 2);
     855 ECB             : 
     856                 :         /* Calculate required number of batches. */
     857 GIC        2284 :         dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
     858 CBC        2284 :         dbatch = Min(dbatch, max_pointers);
     859            2284 :         minbatch = (int) dbatch;
     860 GIC        2284 :         nbatch = pg_nextpower2_32(Max(2, minbatch));
     861 ECB             :     }
     862                 : 
     863 GIC      236562 :     Assert(nbuckets > 0);
     864          236562 :     Assert(nbatch > 0);
     865                 : 
     866          236562 :     *numbuckets = nbuckets;
     867          236562 :     *numbatches = nbatch;
     868                 : }
     869                 : 
     870                 : 
     871                 : /* ----------------------------------------------------------------
     872                 :  *      ExecHashTableDestroy
     873 ECB             :  *
     874                 :  *      destroy a hash table
     875                 :  * ----------------------------------------------------------------
     876                 :  */
     877                 : void
     878 GIC        9844 : ExecHashTableDestroy(HashJoinTable hashtable)
     879                 : {
     880                 :     int         i;
     881                 : 
     882 ECB             :     /*
     883                 :      * Make sure all the temp files are closed.  We skip batch 0, since it
     884                 :      * can't have any temp files (and the arrays might not even exist if
     885                 :      * nbatch is only 1).  Parallel hash joins don't use these files.
     886                 :      */
     887 GBC        9844 :     if (hashtable->innerBatchFile != NULL)
     888 ECB             :     {
     889 GBC         856 :         for (i = 1; i < hashtable->nbatch; i++)
     890                 :         {
     891 GIC         752 :             if (hashtable->innerBatchFile[i])
     892 UIC           0 :                 BufFileClose(hashtable->innerBatchFile[i]);
     893 GIC         752 :             if (hashtable->outerBatchFile[i])
     894 LBC           0 :                 BufFileClose(hashtable->outerBatchFile[i]);
     895                 :         }
     896                 :     }
     897 ECB             : 
     898                 :     /* Release working memory (batchCxt is a child, so it goes away too) */
     899 GIC        9844 :     MemoryContextDelete(hashtable->hashCxt);
     900                 : 
     901                 :     /* And drop the control block */
     902            9844 :     pfree(hashtable);
     903            9844 : }
     904                 : 
     905                 : /*
     906 ECB             :  * ExecHashIncreaseNumBatches
     907                 :  *      increase the original number of batches in order to reduce
     908                 :  *      current memory consumption
     909                 :  */
     910                 : static void
     911 GIC      380031 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
     912                 : {
     913          380031 :     int         oldnbatch = hashtable->nbatch;
     914          380031 :     int         curbatch = hashtable->curbatch;
     915                 :     int         nbatch;
     916                 :     MemoryContext oldcxt;
     917 ECB             :     long        ninmemory;
     918                 :     long        nfreed;
     919                 :     HashMemoryChunk oldchunks;
     920                 : 
     921                 :     /* do nothing if we've decided to shut off growth */
     922 GBC      380031 :     if (!hashtable->growEnabled)
     923 GIC      379940 :         return;
     924 ECB             : 
     925                 :     /* safety check to avoid overflow */
     926 GIC          91 :     if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
     927 UIC           0 :         return;
     928                 : 
     929 GIC          91 :     nbatch = oldnbatch * 2;
     930              91 :     Assert(nbatch > 1);
     931                 : 
     932 ECB             : #ifdef HJDEBUG
     933                 :     printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
     934                 :            hashtable, nbatch, hashtable->spaceUsed);
     935                 : #endif
     936                 : 
     937 CBC          91 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
     938 ECB             : 
     939 GIC          91 :     if (hashtable->innerBatchFile == NULL)
     940 ECB             :     {
     941                 :         /* we had no file arrays before */
     942 GNC          44 :         hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
     943              44 :         hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
     944 ECB             :         /* time to establish the temp tablespaces, too */
     945 GIC          44 :         PrepareTempTablespaces();
     946                 :     }
     947 ECB             :     else
     948                 :     {
     949                 :         /* enlarge arrays and zero out added entries */
     950 GNC          47 :         hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
     951              47 :         hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
     952 ECB             :     }
     953                 : 
     954 GIC          91 :     MemoryContextSwitchTo(oldcxt);
     955 ECB             : 
     956 GIC          91 :     hashtable->nbatch = nbatch;
     957 ECB             : 
     958                 :     /*
     959                 :      * Scan through the existing hash table entries and dump out any that are
     960                 :      * no longer of the current batch.
     961                 :      */
     962 GIC          91 :     ninmemory = nfreed = 0;
     963                 : 
     964                 :     /* If know we need to resize nbuckets, we can do it while rebatching. */
     965              91 :     if (hashtable->nbuckets_optimal != hashtable->nbuckets)
     966                 :     {
     967                 :         /* we never decrease the number of buckets */
     968              44 :         Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
     969                 : 
     970 CBC          44 :         hashtable->nbuckets = hashtable->nbuckets_optimal;
     971              44 :         hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
     972 ECB             : 
     973 CBC          44 :         hashtable->buckets.unshared =
     974 GNC          44 :             repalloc_array(hashtable->buckets.unshared,
     975                 :                            HashJoinTuple, hashtable->nbuckets);
     976 ECB             :     }
     977                 : 
     978                 :     /*
     979                 :      * We will scan through the chunks directly, so that we can reset the
     980                 :      * buckets now and not have to keep track which tuples in the buckets have
     981                 :      * already been processed. We will free the old chunks as we go.
     982                 :      */
     983 GIC          91 :     memset(hashtable->buckets.unshared, 0,
     984 CBC          91 :            sizeof(HashJoinTuple) * hashtable->nbuckets);
     985 GIC          91 :     oldchunks = hashtable->chunks;
     986 CBC          91 :     hashtable->chunks = NULL;
     987 ECB             : 
     988                 :     /* so, let's scan through the old chunks, and all tuples in each chunk */
     989 GIC         455 :     while (oldchunks != NULL)
     990                 :     {
     991             364 :         HashMemoryChunk nextchunk = oldchunks->next.unshared;
     992 ECB             : 
     993                 :         /* position within the buffer (up to oldchunks->used) */
     994 GIC         364 :         size_t      idx = 0;
     995                 : 
     996 ECB             :         /* process all tuples stored in this chunk (and then free it) */
     997 GIC      248669 :         while (idx < oldchunks->used)
     998                 :         {
     999          248305 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
    1000          248305 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1001 CBC      248305 :             int         hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
    1002 ECB             :             int         bucketno;
    1003                 :             int         batchno;
    1004                 : 
    1005 CBC      248305 :             ninmemory++;
    1006          248305 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1007                 :                                       &bucketno, &batchno);
    1008                 : 
    1009 GIC      248305 :             if (batchno == curbatch)
    1010                 :             {
    1011 ECB             :                 /* keep tuple in memory - copy it into the new chunk */
    1012                 :                 HashJoinTuple copyTuple;
    1013                 : 
    1014 CBC       93325 :                 copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1015 GIC       93325 :                 memcpy(copyTuple, hashTuple, hashTupleSize);
    1016 ECB             : 
    1017                 :                 /* and add it back to the appropriate bucket */
    1018 GIC       93325 :                 copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1019           93325 :                 hashtable->buckets.unshared[bucketno] = copyTuple;
    1020                 :             }
    1021 ECB             :             else
    1022                 :             {
    1023                 :                 /* dump it out */
    1024 CBC      154980 :                 Assert(batchno > curbatch);
    1025 GIC      154980 :                 ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
    1026                 :                                       hashTuple->hashvalue,
    1027          154980 :                                       &hashtable->innerBatchFile[batchno]);
    1028 ECB             : 
    1029 CBC      154980 :                 hashtable->spaceUsed -= hashTupleSize;
    1030 GIC      154980 :                 nfreed++;
    1031                 :             }
    1032                 : 
    1033                 :             /* next tuple in this chunk */
    1034          248305 :             idx += MAXALIGN(hashTupleSize);
    1035                 : 
    1036                 :             /* allow this loop to be cancellable */
    1037          248305 :             CHECK_FOR_INTERRUPTS();
    1038                 :         }
    1039                 : 
    1040                 :         /* we're done with this chunk - free it and proceed to the next one */
    1041             364 :         pfree(oldchunks);
    1042             364 :         oldchunks = nextchunk;
    1043                 :     }
    1044                 : 
    1045 ECB             : #ifdef HJDEBUG
    1046                 :     printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
    1047                 :            hashtable, nfreed, ninmemory, hashtable->spaceUsed);
    1048                 : #endif
    1049                 : 
    1050                 :     /*
    1051                 :      * If we dumped out either all or none of the tuples in the table, disable
    1052                 :      * further expansion of nbatch.  This situation implies that we have
    1053                 :      * enough tuples of identical hashvalues to overflow spaceAllowed.
    1054                 :      * Increasing nbatch will not fix it since there's no way to subdivide the
    1055                 :      * group any more finely. We have to just gut it out and hope the server
    1056                 :      * has enough RAM.
    1057                 :      */
    1058 GIC          91 :     if (nfreed == 0 || nfreed == ninmemory)
    1059                 :     {
    1060              22 :         hashtable->growEnabled = false;
    1061 ECB             : #ifdef HJDEBUG
    1062                 :         printf("Hashjoin %p: disabling further increase of nbatch\n",
    1063                 :                hashtable);
    1064                 : #endif
    1065                 :     }
    1066                 : }
    1067                 : 
    1068                 : /*
    1069                 :  * ExecParallelHashIncreaseNumBatches
    1070                 :  *      Every participant attached to grow_batches_barrier must run this
    1071                 :  *      function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
    1072                 :  */
    1073                 : static void
    1074 CBC          30 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
    1075                 : {
    1076 GIC          30 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1077                 : 
    1078 GNC          30 :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1079                 : 
    1080                 :     /*
    1081 ECB             :      * It's unlikely, but we need to be prepared for new participants to show
    1082                 :      * up while we're in the middle of this operation so we need to switch on
    1083                 :      * barrier phase here.
    1084                 :      */
    1085 GIC          30 :     switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
    1086                 :     {
    1087 GNC          29 :         case PHJ_GROW_BATCHES_ELECT:
    1088                 : 
    1089                 :             /*
    1090 ECB             :              * Elect one participant to prepare to grow the number of batches.
    1091                 :              * This involves reallocating or resetting the buckets of batch 0
    1092                 :              * in preparation for all participants to begin repartitioning the
    1093                 :              * tuples.
    1094                 :              */
    1095 GIC          29 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1096 ECB             :                                      WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
    1097                 :             {
    1098                 :                 dsa_pointer_atomic *buckets;
    1099                 :                 ParallelHashJoinBatch *old_batch0;
    1100                 :                 int         new_nbatch;
    1101                 :                 int         i;
    1102                 : 
    1103                 :                 /* Move the old batch out of the way. */
    1104 GIC          28 :                 old_batch0 = hashtable->batches[0].shared;
    1105              28 :                 pstate->old_batches = pstate->batches;
    1106 CBC          28 :                 pstate->old_nbatch = hashtable->nbatch;
    1107 GIC          28 :                 pstate->batches = InvalidDsaPointer;
    1108                 : 
    1109                 :                 /* Free this backend's old accessors. */
    1110              28 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1111                 : 
    1112                 :                 /* Figure out how many batches to use. */
    1113              28 :                 if (hashtable->nbatch == 1)
    1114                 :                 {
    1115 ECB             :                     /*
    1116                 :                      * We are going from single-batch to multi-batch.  We need
    1117                 :                      * to switch from one large combined memory budget to the
    1118                 :                      * regular hash_mem budget.
    1119                 :                      */
    1120 GIC          18 :                     pstate->space_allowed = get_hash_memory_limit();
    1121                 : 
    1122                 :                     /*
    1123 ECB             :                      * The combined hash_mem of all participants wasn't
    1124                 :                      * enough. Therefore one batch per participant would be
    1125                 :                      * approximately equivalent and would probably also be
    1126                 :                      * insufficient.  So try two batches per participant,
    1127                 :                      * rounded up to a power of two.
    1128                 :                      */
    1129 CBC          18 :                     new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
    1130                 :                 }
    1131                 :                 else
    1132 ECB             :                 {
    1133                 :                     /*
    1134                 :                      * We were already multi-batched.  Try doubling the number
    1135                 :                      * of batches.
    1136                 :                      */
    1137 GIC          10 :                     new_nbatch = hashtable->nbatch * 2;
    1138                 :                 }
    1139                 : 
    1140                 :                 /* Allocate new larger generation of batches. */
    1141              28 :                 Assert(hashtable->nbatch == pstate->nbatch);
    1142              28 :                 ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
    1143              28 :                 Assert(hashtable->nbatch == pstate->nbatch);
    1144                 : 
    1145                 :                 /* Replace or recycle batch 0's bucket array. */
    1146              28 :                 if (pstate->old_nbatch == 1)
    1147                 :                 {
    1148 ECB             :                     double      dtuples;
    1149                 :                     double      dbuckets;
    1150                 :                     int         new_nbuckets;
    1151                 : 
    1152                 :                     /*
    1153                 :                      * We probably also need a smaller bucket array.  How many
    1154                 :                      * tuples do we expect per batch, assuming we have only
    1155                 :                      * half of them so far?  Normally we don't need to change
    1156                 :                      * the bucket array's size, because the size of each batch
    1157                 :                      * stays the same as we add more batches, but in this
    1158                 :                      * special case we move from a large batch to many smaller
    1159                 :                      * batches and it would be wasteful to keep the large
    1160                 :                      * array.
    1161                 :                      */
    1162 CBC          18 :                     dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
    1163              18 :                     dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
    1164              18 :                     dbuckets = Min(dbuckets,
    1165                 :                                    MaxAllocSize / sizeof(dsa_pointer_atomic));
    1166 GIC          18 :                     new_nbuckets = (int) dbuckets;
    1167              18 :                     new_nbuckets = Max(new_nbuckets, 1024);
    1168              18 :                     new_nbuckets = pg_nextpower2_32(new_nbuckets);
    1169 CBC          18 :                     dsa_free(hashtable->area, old_batch0->buckets);
    1170 GIC          36 :                     hashtable->batches[0].shared->buckets =
    1171 CBC          18 :                         dsa_allocate(hashtable->area,
    1172 ECB             :                                      sizeof(dsa_pointer_atomic) * new_nbuckets);
    1173                 :                     buckets = (dsa_pointer_atomic *)
    1174 GIC          18 :                         dsa_get_address(hashtable->area,
    1175              18 :                                         hashtable->batches[0].shared->buckets);
    1176           55314 :                     for (i = 0; i < new_nbuckets; ++i)
    1177 CBC       55296 :                         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1178 GIC          18 :                     pstate->nbuckets = new_nbuckets;
    1179                 :                 }
    1180 ECB             :                 else
    1181                 :                 {
    1182                 :                     /* Recycle the existing bucket array. */
    1183 GIC          10 :                     hashtable->batches[0].shared->buckets = old_batch0->buckets;
    1184                 :                     buckets = (dsa_pointer_atomic *)
    1185 CBC          10 :                         dsa_get_address(hashtable->area, old_batch0->buckets);
    1186 GIC       30730 :                     for (i = 0; i < hashtable->nbuckets; ++i)
    1187           30720 :                         dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
    1188                 :                 }
    1189                 : 
    1190                 :                 /* Move all chunks to the work queue for parallel processing. */
    1191 CBC          28 :                 pstate->chunk_work_queue = old_batch0->chunks;
    1192                 : 
    1193                 :                 /* Disable further growth temporarily while we're growing. */
    1194 GIC          28 :                 pstate->growth = PHJ_GROWTH_DISABLED;
    1195 ECB             :             }
    1196                 :             else
    1197                 :             {
    1198                 :                 /* All other participants just flush their tuples to disk. */
    1199 GIC           1 :                 ExecParallelHashCloseBatchAccessors(hashtable);
    1200 ECB             :             }
    1201                 :             /* Fall through. */
    1202                 : 
    1203                 :         case PHJ_GROW_BATCHES_REALLOCATE:
    1204                 :             /* Wait for the above to be finished. */
    1205 GIC          29 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1206                 :                                  WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
    1207                 :             /* Fall through. */
    1208 ECB             : 
    1209 GNC          30 :         case PHJ_GROW_BATCHES_REPARTITION:
    1210                 :             /* Make sure that we have the current dimensions and buckets. */
    1211 GIC          30 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1212              30 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1213                 :             /* Then partition, flush counters. */
    1214              30 :             ExecParallelHashRepartitionFirst(hashtable);
    1215 CBC          30 :             ExecParallelHashRepartitionRest(hashtable);
    1216 GIC          30 :             ExecParallelHashMergeCounters(hashtable);
    1217                 :             /* Wait for the above to be finished. */
    1218 CBC          30 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1219 ECB             :                                  WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
    1220                 :             /* Fall through. */
    1221                 : 
    1222 GNC          30 :         case PHJ_GROW_BATCHES_DECIDE:
    1223 ECB             : 
    1224                 :             /*
    1225                 :              * Elect one participant to clean up and decide whether further
    1226                 :              * repartitioning is needed, or should be disabled because it's
    1227                 :              * not helping.
    1228                 :              */
    1229 GIC          30 :             if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1230 ECB             :                                      WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
    1231                 :             {
    1232 GIC          28 :                 bool        space_exhausted = false;
    1233              28 :                 bool        extreme_skew_detected = false;
    1234                 : 
    1235 ECB             :                 /* Make sure that we have the current dimensions and buckets. */
    1236 GIC          28 :                 ExecParallelHashEnsureBatchAccessors(hashtable);
    1237              28 :                 ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1238                 : 
    1239                 :                 /* Are any of the new generation of batches exhausted? */
    1240 GNC         244 :                 for (int i = 0; i < hashtable->nbatch; ++i)
    1241                 :                 {
    1242 GIC         216 :                     ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
    1243 ECB             : 
    1244 CBC         216 :                     if (batch->space_exhausted ||
    1245             216 :                         batch->estimated_size > pstate->space_allowed)
    1246                 :                     {
    1247                 :                         int         parent;
    1248                 : 
    1249 GIC          12 :                         space_exhausted = true;
    1250 ECB             : 
    1251                 :                         /*
    1252                 :                          * Did this batch receive ALL of the tuples from its
    1253 EUB             :                          * parent batch?  That would indicate that further
    1254                 :                          * repartitioning isn't going to help (the hash values
    1255 ECB             :                          * are probably all the same).
    1256                 :                          */
    1257 GIC          12 :                         parent = i % pstate->old_nbatch;
    1258 CBC          12 :                         if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
    1259              12 :                             extreme_skew_detected = true;
    1260                 :                     }
    1261                 :                 }
    1262                 : 
    1263                 :                 /* Don't keep growing if it's not helping or we'd overflow. */
    1264 GIC          28 :                 if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
    1265 CBC          12 :                     pstate->growth = PHJ_GROWTH_DISABLED;
    1266 GIC          16 :                 else if (space_exhausted)
    1267 UIC           0 :                     pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    1268 ECB             :                 else
    1269 GIC          16 :                     pstate->growth = PHJ_GROWTH_OK;
    1270                 : 
    1271                 :                 /* Free the old batches in shared memory. */
    1272              28 :                 dsa_free(hashtable->area, pstate->old_batches);
    1273              28 :                 pstate->old_batches = InvalidDsaPointer;
    1274                 :             }
    1275                 :             /* Fall through. */
    1276 ECB             : 
    1277                 :         case PHJ_GROW_BATCHES_FINISH:
    1278                 :             /* Wait for the above to complete. */
    1279 GIC          30 :             BarrierArriveAndWait(&pstate->grow_batches_barrier,
    1280                 :                                  WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
    1281 ECB             :     }
    1282 GIC          30 : }
    1283 ECB             : 
    1284                 : /*
    1285                 :  * Repartition the tuples currently loaded into memory for inner batch 0
    1286                 :  * because the number of batches has been increased.  Some tuples are retained
    1287                 :  * in memory and some are written out to a later batch.
    1288                 :  */
    1289                 : static void
    1290 CBC          30 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
    1291 ECB             : {
    1292                 :     dsa_pointer chunk_shared;
    1293                 :     HashMemoryChunk chunk;
    1294                 : 
    1295 GIC          30 :     Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
    1296                 : 
    1297 CBC         208 :     while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
    1298                 :     {
    1299 GIC         148 :         size_t      idx = 0;
    1300 ECB             : 
    1301                 :         /* Repartition all tuples in this chunk. */
    1302 GIC      110998 :         while (idx < chunk->used)
    1303                 :         {
    1304          110850 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1305 CBC      110850 :             MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
    1306 ECB             :             HashJoinTuple copyTuple;
    1307                 :             dsa_pointer shared;
    1308                 :             int         bucketno;
    1309                 :             int         batchno;
    1310                 : 
    1311 GIC      110850 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1312                 :                                       &bucketno, &batchno);
    1313                 : 
    1314          110850 :             Assert(batchno < hashtable->nbatch);
    1315 CBC      110850 :             if (batchno == 0)
    1316 ECB             :             {
    1317                 :                 /* It still belongs in batch 0.  Copy to a new chunk. */
    1318                 :                 copyTuple =
    1319 CBC       25535 :                     ExecParallelHashTupleAlloc(hashtable,
    1320           25535 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1321 ECB             :                                                &shared);
    1322 GIC       25535 :                 copyTuple->hashvalue = hashTuple->hashvalue;
    1323           25535 :                 memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
    1324           25535 :                 ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1325 ECB             :                                           copyTuple, shared);
    1326                 :             }
    1327                 :             else
    1328                 :             {
    1329 GIC       85315 :                 size_t      tuple_size =
    1330           85315 :                 MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1331                 : 
    1332                 :                 /* It belongs in a later batch. */
    1333 CBC       85315 :                 hashtable->batches[batchno].estimated_size += tuple_size;
    1334 GIC       85315 :                 sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1335 CBC       85315 :                              &hashTuple->hashvalue, tuple);
    1336                 :             }
    1337 ECB             : 
    1338                 :             /* Count this tuple. */
    1339 GIC      110850 :             ++hashtable->batches[0].old_ntuples;
    1340          110850 :             ++hashtable->batches[batchno].ntuples;
    1341                 : 
    1342          110850 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1343 ECB             :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1344                 :         }
    1345                 : 
    1346                 :         /* Free this chunk. */
    1347 GIC         148 :         dsa_free(hashtable->area, chunk_shared);
    1348                 : 
    1349             148 :         CHECK_FOR_INTERRUPTS();
    1350                 :     }
    1351              30 : }
    1352                 : 
    1353 ECB             : /*
    1354                 :  * Help repartition inner batches 1..n.
    1355                 :  */
    1356                 : static void
    1357 CBC          30 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
    1358 ECB             : {
    1359 GIC          30 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1360 CBC          30 :     int         old_nbatch = pstate->old_nbatch;
    1361                 :     SharedTuplestoreAccessor **old_inner_tuples;
    1362                 :     ParallelHashJoinBatch *old_batches;
    1363                 :     int         i;
    1364                 : 
    1365                 :     /* Get our hands on the previous generation of batches. */
    1366 ECB             :     old_batches = (ParallelHashJoinBatch *)
    1367 GIC          30 :         dsa_get_address(hashtable->area, pstate->old_batches);
    1368 GNC          30 :     old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
    1369 GIC          83 :     for (i = 1; i < old_nbatch; ++i)
    1370                 :     {
    1371              53 :         ParallelHashJoinBatch *shared =
    1372 CBC          53 :         NthParallelHashJoinBatch(old_batches, i);
    1373 ECB             : 
    1374 GIC          53 :         old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
    1375 ECB             :                                          ParallelWorkerNumber + 1,
    1376                 :                                          &pstate->fileset);
    1377                 :     }
    1378                 : 
    1379                 :     /* Join in the effort to repartition them. */
    1380 CBC          83 :     for (i = 1; i < old_nbatch; ++i)
    1381                 :     {
    1382                 :         MinimalTuple tuple;
    1383 ECB             :         uint32      hashvalue;
    1384                 : 
    1385                 :         /* Scan one partition from the previous generation. */
    1386 GIC          53 :         sts_begin_parallel_scan(old_inner_tuples[i]);
    1387          108653 :         while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
    1388 ECB             :         {
    1389 GIC      108600 :             size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1390                 :             int         bucketno;
    1391 ECB             :             int         batchno;
    1392                 : 
    1393                 :             /* Decide which partition it goes to in the new generation. */
    1394 GIC      108600 :             ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
    1395                 :                                       &batchno);
    1396 ECB             : 
    1397 CBC      108600 :             hashtable->batches[batchno].estimated_size += tuple_size;
    1398 GIC      108600 :             ++hashtable->batches[batchno].ntuples;
    1399          108600 :             ++hashtable->batches[i].old_ntuples;
    1400                 : 
    1401                 :             /* Store the tuple its new batch. */
    1402          108600 :             sts_puttuple(hashtable->batches[batchno].inner_tuples,
    1403 ECB             :                          &hashvalue, tuple);
    1404                 : 
    1405 CBC      108600 :             CHECK_FOR_INTERRUPTS();
    1406                 :         }
    1407 GIC          53 :         sts_end_parallel_scan(old_inner_tuples[i]);
    1408 ECB             :     }
    1409                 : 
    1410 CBC          30 :     pfree(old_inner_tuples);
    1411 GIC          30 : }
    1412 ECB             : 
    1413                 : /*
    1414                 :  * Transfer the backend-local per-batch counters to the shared totals.
    1415                 :  */
    1416                 : static void
    1417 CBC         166 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
    1418 ECB             : {
    1419 CBC         166 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1420 ECB             :     int         i;
    1421                 : 
    1422 CBC         166 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    1423 GIC         166 :     pstate->total_tuples = 0;
    1424 CBC        1037 :     for (i = 0; i < hashtable->nbatch; ++i)
    1425 ECB             :     {
    1426 GIC         871 :         ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
    1427                 : 
    1428             871 :         batch->shared->size += batch->size;
    1429             871 :         batch->shared->estimated_size += batch->estimated_size;
    1430             871 :         batch->shared->ntuples += batch->ntuples;
    1431             871 :         batch->shared->old_ntuples += batch->old_ntuples;
    1432             871 :         batch->size = 0;
    1433 CBC         871 :         batch->estimated_size = 0;
    1434 GIC         871 :         batch->ntuples = 0;
    1435             871 :         batch->old_ntuples = 0;
    1436             871 :         pstate->total_tuples += batch->shared->ntuples;
    1437                 :     }
    1438 CBC         166 :     LWLockRelease(&pstate->lock);
    1439 GBC         166 : }
    1440                 : 
    1441                 : /*
    1442                 :  * ExecHashIncreaseNumBuckets
    1443                 :  *      increase the original number of buckets in order to reduce
    1444                 :  *      number of tuples per bucket
    1445                 :  */
    1446 ECB             : static void
    1447 CBC          59 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
    1448                 : {
    1449 ECB             :     HashMemoryChunk chunk;
    1450                 : 
    1451                 :     /* do nothing if not an increase (it's called increase for a reason) */
    1452 GIC          59 :     if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
    1453 UIC           0 :         return;
    1454                 : 
    1455                 : #ifdef HJDEBUG
    1456                 :     printf("Hashjoin %p: increasing nbuckets %d => %d\n",
    1457                 :            hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
    1458                 : #endif
    1459 ECB             : 
    1460 CBC          59 :     hashtable->nbuckets = hashtable->nbuckets_optimal;
    1461 GIC          59 :     hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
    1462                 : 
    1463 CBC          59 :     Assert(hashtable->nbuckets > 1);
    1464              59 :     Assert(hashtable->nbuckets <= (INT_MAX / 2));
    1465 GIC          59 :     Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
    1466                 : 
    1467 ECB             :     /*
    1468                 :      * Just reallocate the proper number of buckets - we don't need to walk
    1469                 :      * through them - we can walk the dense-allocated chunks (just like in
    1470                 :      * ExecHashIncreaseNumBatches, but without all the copying into new
    1471                 :      * chunks)
    1472                 :      */
    1473 GIC          59 :     hashtable->buckets.unshared =
    1474 GNC          59 :         repalloc_array(hashtable->buckets.unshared,
    1475                 :                        HashJoinTuple, hashtable->nbuckets);
    1476                 : 
    1477 GIC          59 :     memset(hashtable->buckets.unshared, 0,
    1478 CBC          59 :            hashtable->nbuckets * sizeof(HashJoinTuple));
    1479                 : 
    1480                 :     /* scan through all tuples in all chunks to rebuild the hash table */
    1481 GIC         622 :     for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
    1482 ECB             :     {
    1483                 :         /* process all tuples stored in this chunk */
    1484 GIC         563 :         size_t      idx = 0;
    1485                 : 
    1486 CBC      387856 :         while (idx < chunk->used)
    1487                 :         {
    1488 GIC      387293 :             HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1489                 :             int         bucketno;
    1490                 :             int         batchno;
    1491 ECB             : 
    1492 GIC      387293 :             ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1493                 :                                       &bucketno, &batchno);
    1494                 : 
    1495                 :             /* add the tuple to the proper bucket */
    1496 CBC      387293 :             hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1497 GIC      387293 :             hashtable->buckets.unshared[bucketno] = hashTuple;
    1498 ECB             : 
    1499                 :             /* advance index past the tuple */
    1500 GIC      387293 :             idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1501                 :                             HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1502                 :         }
    1503 ECB             : 
    1504                 :         /* allow this loop to be cancellable */
    1505 GIC         563 :         CHECK_FOR_INTERRUPTS();
    1506                 :     }
    1507                 : }
    1508                 : 
    1509                 : static void
    1510 CBC          72 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
    1511                 : {
    1512              72 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    1513                 :     int         i;
    1514 ECB             :     HashMemoryChunk chunk;
    1515                 :     dsa_pointer chunk_s;
    1516                 : 
    1517 GNC          72 :     Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    1518                 : 
    1519                 :     /*
    1520                 :      * It's unlikely, but we need to be prepared for new participants to show
    1521 ECB             :      * up while we're in the middle of this operation so we need to switch on
    1522                 :      * barrier phase here.
    1523                 :      */
    1524 CBC          72 :     switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
    1525 ECB             :     {
    1526 GNC          72 :         case PHJ_GROW_BUCKETS_ELECT:
    1527                 :             /* Elect one participant to prepare to increase nbuckets. */
    1528 CBC          72 :             if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1529 ECB             :                                      WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
    1530                 :             {
    1531                 :                 size_t      size;
    1532                 :                 dsa_pointer_atomic *buckets;
    1533                 : 
    1534                 :                 /* Double the size of the bucket array. */
    1535 GIC          54 :                 pstate->nbuckets *= 2;
    1536              54 :                 size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
    1537 CBC          54 :                 hashtable->batches[0].shared->size += size / 2;
    1538 GIC          54 :                 dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
    1539             108 :                 hashtable->batches[0].shared->buckets =
    1540              54 :                     dsa_allocate(hashtable->area, size);
    1541                 :                 buckets = (dsa_pointer_atomic *)
    1542              54 :                     dsa_get_address(hashtable->area,
    1543 CBC          54 :                                     hashtable->batches[0].shared->buckets);
    1544 GIC      466998 :                 for (i = 0; i < pstate->nbuckets; ++i)
    1545          466944 :                     dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    1546                 : 
    1547 ECB             :                 /* Put the chunk list onto the work queue. */
    1548 GIC          54 :                 pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
    1549 ECB             : 
    1550                 :                 /* Clear the flag. */
    1551 CBC          54 :                 pstate->growth = PHJ_GROWTH_OK;
    1552                 :             }
    1553 ECB             :             /* Fall through. */
    1554                 : 
    1555                 :         case PHJ_GROW_BUCKETS_REALLOCATE:
    1556                 :             /* Wait for the above to complete. */
    1557 CBC          72 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1558                 :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
    1559                 :             /* Fall through. */
    1560                 : 
    1561 GNC          72 :         case PHJ_GROW_BUCKETS_REINSERT:
    1562 ECB             :             /* Reinsert all tuples into the hash table. */
    1563 GIC          72 :             ExecParallelHashEnsureBatchAccessors(hashtable);
    1564 CBC          72 :             ExecParallelHashTableSetCurrentBatch(hashtable, 0);
    1565 GIC         487 :             while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
    1566                 :             {
    1567 CBC         343 :                 size_t      idx = 0;
    1568                 : 
    1569 GIC      280917 :                 while (idx < chunk->used)
    1570                 :                 {
    1571 CBC      280574 :                     HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
    1572 GIC      280574 :                     dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
    1573                 :                     int         bucketno;
    1574                 :                     int         batchno;
    1575                 : 
    1576 CBC      280574 :                     ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
    1577                 :                                               &bucketno, &batchno);
    1578          280574 :                     Assert(batchno == 0);
    1579                 : 
    1580                 :                     /* add the tuple to the proper bucket */
    1581          280574 :                     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1582                 :                                               hashTuple, shared);
    1583                 : 
    1584                 :                     /* advance index past the tuple */
    1585 GIC      280574 :                     idx += MAXALIGN(HJTUPLE_OVERHEAD +
    1586                 :                                     HJTUPLE_MINTUPLE(hashTuple)->t_len);
    1587                 :                 }
    1588                 : 
    1589                 :                 /* allow this loop to be cancellable */
    1590             343 :                 CHECK_FOR_INTERRUPTS();
    1591                 :             }
    1592              72 :             BarrierArriveAndWait(&pstate->grow_buckets_barrier,
    1593                 :                                  WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
    1594                 :     }
    1595 CBC          72 : }
    1596                 : 
    1597                 : /*
    1598                 :  * ExecHashTableInsert
    1599                 :  *      insert a tuple into the hash table depending on the hash value
    1600 ECB             :  *      it may just go to a temp file for later batches
    1601                 :  *
    1602                 :  * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
    1603                 :  * tuple; the minimal case in particular is certain to happen while reloading
    1604                 :  * tuples from batch files.  We could save some cycles in the regular-tuple
    1605                 :  * case by not forcing the slot contents into minimal form; not clear if it's
    1606                 :  * worth the messiness required.
    1607                 :  */
    1608                 : void
    1609 GIC     7002056 : ExecHashTableInsert(HashJoinTable hashtable,
    1610 ECB             :                     TupleTableSlot *slot,
    1611                 :                     uint32 hashvalue)
    1612                 : {
    1613                 :     bool        shouldFree;
    1614 GIC     7002056 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1615                 :     int         bucketno;
    1616                 :     int         batchno;
    1617 ECB             : 
    1618 GIC     7002056 :     ExecHashGetBucketAndBatch(hashtable, hashvalue,
    1619                 :                               &bucketno, &batchno);
    1620 ECB             : 
    1621                 :     /*
    1622                 :      * decide whether to put the tuple in the hash table or a temp file
    1623                 :      */
    1624 CBC     7002056 :     if (batchno == hashtable->curbatch)
    1625                 :     {
    1626                 :         /*
    1627                 :          * put the tuple in hash table
    1628                 :          */
    1629                 :         HashJoinTuple hashTuple;
    1630                 :         int         hashTupleSize;
    1631 GIC     5488540 :         double      ntuples = (hashtable->totalTuples - hashtable->skewTuples);
    1632 ECB             : 
    1633                 :         /* Create the HashJoinTuple */
    1634 GIC     5488540 :         hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    1635 CBC     5488540 :         hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
    1636 ECB             : 
    1637 GIC     5488540 :         hashTuple->hashvalue = hashvalue;
    1638         5488540 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1639                 : 
    1640                 :         /*
    1641                 :          * We always reset the tuple-matched flag on insertion.  This is okay
    1642                 :          * even when reloading a tuple from a batch file, since the tuple
    1643 ECB             :          * could not possibly have been matched to an outer tuple before it
    1644                 :          * went into the batch file.
    1645                 :          */
    1646 GIC     5488540 :         HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1647 ECB             : 
    1648                 :         /* Push it onto the front of the bucket's list */
    1649 GIC     5488540 :         hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    1650 CBC     5488540 :         hashtable->buckets.unshared[bucketno] = hashTuple;
    1651 ECB             : 
    1652                 :         /*
    1653                 :          * Increase the (optimal) number of buckets if we just exceeded the
    1654                 :          * NTUP_PER_BUCKET threshold, but only when there's still a single
    1655                 :          * batch.
    1656                 :          */
    1657 CBC     5488540 :         if (hashtable->nbatch == 1 &&
    1658         3763913 :             ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
    1659 ECB             :         {
    1660                 :             /* Guard against integer overflow and alloc size overflow */
    1661 CBC         147 :             if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
    1662             147 :                 hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
    1663                 :             {
    1664 GIC         147 :                 hashtable->nbuckets_optimal *= 2;
    1665             147 :                 hashtable->log2_nbuckets_optimal += 1;
    1666                 :             }
    1667                 :         }
    1668                 : 
    1669 ECB             :         /* Account for space used, and back off if we've used too much */
    1670 CBC     5488540 :         hashtable->spaceUsed += hashTupleSize;
    1671 GIC     5488540 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    1672 CBC     4220988 :             hashtable->spacePeak = hashtable->spaceUsed;
    1673 GIC     5488540 :         if (hashtable->spaceUsed +
    1674         5488540 :             hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
    1675 CBC     5488540 :             > hashtable->spaceAllowed)
    1676          380031 :             ExecHashIncreaseNumBatches(hashtable);
    1677 ECB             :     }
    1678                 :     else
    1679                 :     {
    1680                 :         /*
    1681                 :          * put the tuple into a temp file for later batches
    1682                 :          */
    1683 GIC     1513516 :         Assert(batchno > hashtable->curbatch);
    1684 CBC     1513516 :         ExecHashJoinSaveTuple(tuple,
    1685                 :                               hashvalue,
    1686 GIC     1513516 :                               &hashtable->innerBatchFile[batchno]);
    1687                 :     }
    1688                 : 
    1689 CBC     7002056 :     if (shouldFree)
    1690 GIC     5318264 :         heap_free_minimal_tuple(tuple);
    1691         7002056 : }
    1692                 : 
    1693                 : /*
    1694 ECB             :  * ExecParallelHashTableInsert
    1695                 :  *      insert a tuple into a shared hash table or shared batch tuplestore
    1696                 :  */
    1697                 : void
    1698 GIC     1080060 : ExecParallelHashTableInsert(HashJoinTable hashtable,
    1699                 :                             TupleTableSlot *slot,
    1700                 :                             uint32 hashvalue)
    1701                 : {
    1702 ECB             :     bool        shouldFree;
    1703 GIC     1080060 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1704 ECB             :     dsa_pointer shared;
    1705                 :     int         bucketno;
    1706                 :     int         batchno;
    1707                 : 
    1708 CBC         183 : retry:
    1709 GIC     1080243 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1710                 : 
    1711 CBC     1080243 :     if (batchno == 0)
    1712 ECB             :     {
    1713                 :         HashJoinTuple hashTuple;
    1714                 : 
    1715                 :         /* Try to load it into memory. */
    1716 GIC      623502 :         Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
    1717                 :                PHJ_BUILD_HASH_INNER);
    1718          623502 :         hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1719          623502 :                                                HJTUPLE_OVERHEAD + tuple->t_len,
    1720 ECB             :                                                &shared);
    1721 GIC      623502 :         if (hashTuple == NULL)
    1722 CBC         164 :             goto retry;
    1723                 : 
    1724                 :         /* Store the hash value in the HashJoinTuple header. */
    1725          623338 :         hashTuple->hashvalue = hashvalue;
    1726 GIC      623338 :         memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1727 ECB             : 
    1728                 :         /* Push it onto the front of the bucket's list */
    1729 GIC      623338 :         ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1730                 :                                   hashTuple, shared);
    1731 ECB             :     }
    1732                 :     else
    1733                 :     {
    1734 GIC      456741 :         size_t      tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
    1735                 : 
    1736 CBC      456741 :         Assert(batchno > 0);
    1737                 : 
    1738 ECB             :         /* Try to preallocate space in the batch if necessary. */
    1739 CBC      456741 :         if (hashtable->batches[batchno].preallocated < tuple_size)
    1740 ECB             :         {
    1741 GIC         865 :             if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
    1742              19 :                 goto retry;
    1743                 :         }
    1744                 : 
    1745          456722 :         Assert(hashtable->batches[batchno].preallocated >= tuple_size);
    1746          456722 :         hashtable->batches[batchno].preallocated -= tuple_size;
    1747          456722 :         sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
    1748                 :                      tuple);
    1749 ECB             :     }
    1750 GIC     1080060 :     ++hashtable->batches[batchno].ntuples;
    1751                 : 
    1752         1080060 :     if (shouldFree)
    1753         1080060 :         heap_free_minimal_tuple(tuple);
    1754 CBC     1080060 : }
    1755                 : 
    1756                 : /*
    1757                 :  * Insert a tuple into the current hash table.  Unlike
    1758                 :  * ExecParallelHashTableInsert, this version is not prepared to send the tuple
    1759                 :  * to other batches or to run out of memory, and should only be called with
    1760 ECB             :  * tuples that belong in the current batch once growth has been disabled.
    1761                 :  */
    1762                 : void
    1763 CBC      542037 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
    1764                 :                                         TupleTableSlot *slot,
    1765 ECB             :                                         uint32 hashvalue)
    1766                 : {
    1767                 :     bool        shouldFree;
    1768 CBC      542037 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    1769                 :     HashJoinTuple hashTuple;
    1770                 :     dsa_pointer shared;
    1771 ECB             :     int         batchno;
    1772 EUB             :     int         bucketno;
    1773 ECB             : 
    1774 GIC      542037 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    1775          542037 :     Assert(batchno == hashtable->curbatch);
    1776          542037 :     hashTuple = ExecParallelHashTupleAlloc(hashtable,
    1777          542037 :                                            HJTUPLE_OVERHEAD + tuple->t_len,
    1778                 :                                            &shared);
    1779          542037 :     hashTuple->hashvalue = hashvalue;
    1780          542037 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    1781          542037 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    1782          542037 :     ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
    1783                 :                               hashTuple, shared);
    1784                 : 
    1785          542037 :     if (shouldFree)
    1786 UIC           0 :         heap_free_minimal_tuple(tuple);
    1787 GIC      542037 : }
    1788                 : 
    1789                 : /*
    1790                 :  * ExecHashGetHashValue
    1791                 :  *      Compute the hash value for a tuple
    1792                 :  *
    1793 ECB             :  * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
    1794                 :  * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
    1795                 :  * is false (meaning it's the HashJoin's inner node, Hash), econtext,
    1796                 :  * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
    1797                 :  * being suitable for tuples from the node below the Hash. Conversely, if
    1798                 :  * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
    1799                 :  * be appropriate for tuples from HashJoin's outer node.
    1800                 :  *
    1801                 :  * A true result means the tuple's hash value has been successfully computed
    1802                 :  * and stored at *hashvalue.  A false result means the tuple cannot match
    1803                 :  * because it contains a null attribute, and hence it should be discarded
    1804                 :  * immediately.  (If keep_nulls is true then false is never returned.)
    1805                 :  */
    1806                 : bool
    1807 GIC    14391751 : ExecHashGetHashValue(HashJoinTable hashtable,
    1808                 :                      ExprContext *econtext,
    1809                 :                      List *hashkeys,
    1810 ECB             :                      bool outer_tuple,
    1811                 :                      bool keep_nulls,
    1812                 :                      uint32 *hashvalue)
    1813                 : {
    1814 CBC    14391751 :     uint32      hashkey = 0;
    1815 ECB             :     FmgrInfo   *hashfunctions;
    1816                 :     ListCell   *hk;
    1817 CBC    14391751 :     int         i = 0;
    1818                 :     MemoryContext oldContext;
    1819 ECB             : 
    1820                 :     /*
    1821                 :      * We reset the eval context each time to reclaim any memory leaked in the
    1822                 :      * hashkey expressions.
    1823                 :      */
    1824 GIC    14391751 :     ResetExprContext(econtext);
    1825                 : 
    1826 CBC    14391751 :     oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
    1827                 : 
    1828 GIC    14391751 :     if (outer_tuple)
    1829         7977936 :         hashfunctions = hashtable->outer_hashfunctions;
    1830                 :     else
    1831 CBC     6413815 :         hashfunctions = hashtable->inner_hashfunctions;
    1832                 : 
    1833 GIC    29586856 :     foreach(hk, hashkeys)
    1834                 :     {
    1835        15195518 :         ExprState  *keyexpr = (ExprState *) lfirst(hk);
    1836                 :         Datum       keyval;
    1837                 :         bool        isNull;
    1838                 : 
    1839                 :         /* combine successive hashkeys by rotating */
    1840        15195518 :         hashkey = pg_rotate_left32(hashkey, 1);
    1841                 : 
    1842                 :         /*
    1843                 :          * Get the join attribute value of the tuple
    1844                 :          */
    1845        15195518 :         keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
    1846 ECB             : 
    1847                 :         /*
    1848                 :          * If the attribute is NULL, and the join operator is strict, then
    1849                 :          * this tuple cannot pass the join qual so we can reject it
    1850                 :          * immediately (unless we're scanning the outside of an outer join, in
    1851                 :          * which case we must not reject it).  Otherwise we act like the
    1852                 :          * hashcode of NULL is zero (this will support operators that act like
    1853                 :          * IS NOT DISTINCT, though not any more-random behavior).  We treat
    1854                 :          * the hash support function as strict even if the operator is not.
    1855                 :          *
    1856                 :          * Note: currently, all hashjoinable operators must be strict since
    1857                 :          * the hash index AM assumes that.  However, it takes so little extra
    1858                 :          * code here to allow non-strict that we may as well do it.
    1859                 :          */
    1860 CBC    15195518 :         if (isNull)
    1861 ECB             :         {
    1862 GIC         529 :             if (hashtable->hashStrict[i] && !keep_nulls)
    1863                 :             {
    1864 CBC         413 :                 MemoryContextSwitchTo(oldContext);
    1865 GIC         413 :                 return false;   /* cannot match */
    1866                 :             }
    1867 ECB             :             /* else, leave hashkey unmodified, equivalent to hashcode 0 */
    1868                 :         }
    1869                 :         else
    1870                 :         {
    1871                 :             /* Compute the hash function */
    1872                 :             uint32      hkey;
    1873                 : 
    1874 GIC    15194989 :             hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
    1875        15194989 :             hashkey ^= hkey;
    1876                 :         }
    1877                 : 
    1878        15195105 :         i++;
    1879                 :     }
    1880                 : 
    1881        14391338 :     MemoryContextSwitchTo(oldContext);
    1882                 : 
    1883        14391338 :     *hashvalue = hashkey;
    1884        14391338 :     return true;
    1885                 : }
    1886                 : 
    1887                 : /*
    1888                 :  * ExecHashGetBucketAndBatch
    1889                 :  *      Determine the bucket number and batch number for a hash value
    1890                 :  *
    1891                 :  * Note: on-the-fly increases of nbatch must not change the bucket number
    1892                 :  * for a given hash code (since we don't move tuples to different hash
    1893                 :  * chains), and must only cause the batch number to remain the same or
    1894                 :  * increase.  Our algorithm is
    1895                 :  *      bucketno = hashvalue MOD nbuckets
    1896                 :  *      batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
    1897                 :  * where nbuckets and nbatch are both expected to be powers of 2, so we can
    1898                 :  * do the computations by shifting and masking.  (This assumes that all hash
    1899                 :  * functions are good about randomizing all their output bits, else we are
    1900                 :  * likely to have very skewed bucket or batch occupancy.)
    1901 ECB             :  *
    1902                 :  * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
    1903                 :  * bucket count growth.  Once we start batching, the value is fixed and does
    1904                 :  * not change over the course of the join (making it possible to compute batch
    1905                 :  * number the way we do here).
    1906                 :  *
    1907                 :  * nbatch is always a power of 2; we increase it only by doubling it.  This
    1908                 :  * effectively adds one more bit to the top of the batchno.  In very large
    1909                 :  * joins, we might run out of bits to add, so we do this by rotating the hash
    1910                 :  * value.  This causes batchno to steal bits from bucketno when the number of
    1911                 :  * virtual buckets exceeds 2^32.  It's better to have longer bucket chains
    1912                 :  * than to lose the ability to divide batches.
    1913                 :  */
    1914                 : void
    1915 GIC    19072646 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
    1916                 :                           uint32 hashvalue,
    1917 ECB             :                           int *bucketno,
    1918                 :                           int *batchno)
    1919                 : {
    1920 CBC    19072646 :     uint32      nbuckets = (uint32) hashtable->nbuckets;
    1921 GIC    19072646 :     uint32      nbatch = (uint32) hashtable->nbatch;
    1922                 : 
    1923        19072646 :     if (nbatch > 1)
    1924                 :     {
    1925         7629697 :         *bucketno = hashvalue & (nbuckets - 1);
    1926         7629697 :         *batchno = pg_rotate_right32(hashvalue,
    1927         7629697 :                                      hashtable->log2_nbuckets) & (nbatch - 1);
    1928                 :     }
    1929                 :     else
    1930                 :     {
    1931        11442949 :         *bucketno = hashvalue & (nbuckets - 1);
    1932        11442949 :         *batchno = 0;
    1933 ECB             :     }
    1934 GIC    19072646 : }
    1935                 : 
    1936 ECB             : /*
    1937                 :  * ExecScanHashBucket
    1938                 :  *      scan a hash bucket for matches to the current outer tuple
    1939                 :  *
    1940                 :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    1941                 :  *
    1942                 :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    1943                 :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    1944                 :  * for the latter.
    1945                 :  */
    1946                 : bool
    1947 GIC     8962526 : ExecScanHashBucket(HashJoinState *hjstate,
    1948 ECB             :                    ExprContext *econtext)
    1949                 : {
    1950 CBC     8962526 :     ExprState  *hjclauses = hjstate->hashclauses;
    1951         8962526 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    1952 GIC     8962526 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    1953 CBC     8962526 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    1954                 : 
    1955 ECB             :     /*
    1956                 :      * hj_CurTuple is the address of the tuple last returned from the current
    1957                 :      * bucket, or NULL if it's time to start scanning a new bucket.
    1958                 :      *
    1959                 :      * If the tuple hashed to a skew bucket then scan the skew bucket
    1960                 :      * otherwise scan the standard hashtable bucket.
    1961                 :      */
    1962 CBC     8962526 :     if (hashTuple != NULL)
    1963 GIC     2065009 :         hashTuple = hashTuple->next.unshared;
    1964         6897517 :     else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
    1965 CBC        1200 :         hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
    1966                 :     else
    1967         6896317 :         hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    1968                 : 
    1969        11510422 :     while (hashTuple != NULL)
    1970 ECB             :     {
    1971 GIC     6665623 :         if (hashTuple->hashvalue == hashvalue)
    1972                 :         {
    1973                 :             TupleTableSlot *inntuple;
    1974 ECB             : 
    1975                 :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    1976 GIC     4117730 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    1977                 :                                              hjstate->hj_HashTupleSlot,
    1978                 :                                              false);    /* do not pfree */
    1979         4117730 :             econtext->ecxt_innertuple = inntuple;
    1980 ECB             : 
    1981 GIC     4117730 :             if (ExecQualAndReset(hjclauses, econtext))
    1982                 :             {
    1983         4117727 :                 hjstate->hj_CurTuple = hashTuple;
    1984         4117727 :                 return true;
    1985                 :             }
    1986                 :         }
    1987                 : 
    1988         2547896 :         hashTuple = hashTuple->next.unshared;
    1989                 :     }
    1990                 : 
    1991                 :     /*
    1992                 :      * no match
    1993                 :      */
    1994 CBC     4844799 :     return false;
    1995                 : }
    1996                 : 
    1997 ECB             : /*
    1998                 :  * ExecParallelScanHashBucket
    1999                 :  *      scan a hash bucket for matches to the current outer tuple
    2000                 :  *
    2001                 :  * The current outer tuple must be stored in econtext->ecxt_outertuple.
    2002                 :  *
    2003                 :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2004                 :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2005                 :  * for the latter.
    2006                 :  */
    2007                 : bool
    2008 GIC     2100024 : ExecParallelScanHashBucket(HashJoinState *hjstate,
    2009 ECB             :                            ExprContext *econtext)
    2010                 : {
    2011 GIC     2100024 :     ExprState  *hjclauses = hjstate->hashclauses;
    2012 CBC     2100024 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2013 GIC     2100024 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2014 CBC     2100024 :     uint32      hashvalue = hjstate->hj_CurHashValue;
    2015                 : 
    2016                 :     /*
    2017                 :      * hj_CurTuple is the address of the tuple last returned from the current
    2018                 :      * bucket, or NULL if it's time to start scanning a new bucket.
    2019 ECB             :      */
    2020 GIC     2100024 :     if (hashTuple != NULL)
    2021         1020012 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2022 ECB             :     else
    2023 GIC     1080012 :         hashTuple = ExecParallelHashFirstTuple(hashtable,
    2024 ECB             :                                                hjstate->hj_CurBucketNo);
    2025                 : 
    2026 CBC     2800064 :     while (hashTuple != NULL)
    2027 ECB             :     {
    2028 GIC     1720052 :         if (hashTuple->hashvalue == hashvalue)
    2029                 :         {
    2030                 :             TupleTableSlot *inntuple;
    2031 ECB             : 
    2032                 :             /* insert hashtable's tuple into exec slot so ExecQual sees it */
    2033 GIC     1020012 :             inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2034                 :                                              hjstate->hj_HashTupleSlot,
    2035                 :                                              false);    /* do not pfree */
    2036         1020012 :             econtext->ecxt_innertuple = inntuple;
    2037 ECB             : 
    2038 GIC     1020012 :             if (ExecQualAndReset(hjclauses, econtext))
    2039                 :             {
    2040         1020012 :                 hjstate->hj_CurTuple = hashTuple;
    2041         1020012 :                 return true;
    2042                 :             }
    2043                 :         }
    2044                 : 
    2045 CBC      700040 :         hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2046                 :     }
    2047                 : 
    2048                 :     /*
    2049                 :      * no match
    2050                 :      */
    2051 GIC     1080012 :     return false;
    2052                 : }
    2053                 : 
    2054                 : /*
    2055 ECB             :  * ExecPrepHashTableForUnmatched
    2056                 :  *      set up for a series of ExecScanHashTableForUnmatched calls
    2057                 :  */
    2058                 : void
    2059 GIC        2414 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
    2060                 : {
    2061                 :     /*----------
    2062                 :      * During this scan we use the HashJoinState fields as follows:
    2063                 :      *
    2064                 :      * hj_CurBucketNo: next regular bucket to scan
    2065                 :      * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
    2066 ECB             :      * hj_CurTuple: last tuple returned, or NULL to start next bucket
    2067                 :      *----------
    2068                 :      */
    2069 CBC        2414 :     hjstate->hj_CurBucketNo = 0;
    2070            2414 :     hjstate->hj_CurSkewBucketNo = 0;
    2071 GIC        2414 :     hjstate->hj_CurTuple = NULL;
    2072 CBC        2414 : }
    2073                 : 
    2074                 : /*
    2075                 :  * Decide if this process is allowed to run the unmatched scan.  If so, the
    2076                 :  * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
    2077                 :  * Otherwise the batch is detached and false is returned.
    2078                 :  */
    2079                 : bool
    2080 GNC          43 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
    2081                 : {
    2082              43 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2083              43 :     int         curbatch = hashtable->curbatch;
    2084              43 :     ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    2085                 : 
    2086              43 :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
    2087                 : 
    2088                 :     /*
    2089                 :      * It would not be deadlock-free to wait on the batch barrier, because it
    2090                 :      * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
    2091                 :      * already emitted tuples.  Therefore, we'll hold a wait-free election:
    2092                 :      * only one process can continue to the next phase, and all others detach
    2093                 :      * from this batch.  They can still go any work on other batches, if there
    2094                 :      * are any.
    2095                 :      */
    2096              43 :     if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
    2097                 :     {
    2098                 :         /* This process considers the batch to be done. */
    2099              13 :         hashtable->batches[hashtable->curbatch].done = true;
    2100                 : 
    2101                 :         /* Make sure any temporary files are closed. */
    2102              13 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    2103              13 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    2104                 : 
    2105                 :         /*
    2106                 :          * Track largest batch we've seen, which would normally happen in
    2107                 :          * ExecHashTableDetachBatch().
    2108                 :          */
    2109              13 :         hashtable->spacePeak =
    2110              13 :             Max(hashtable->spacePeak,
    2111                 :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    2112              13 :         hashtable->curbatch = -1;
    2113              13 :         return false;
    2114                 :     }
    2115                 : 
    2116                 :     /* Now we are alone with this batch. */
    2117              30 :     Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    2118              30 :     Assert(BarrierParticipants(&batch->batch_barrier) == 1);
    2119                 : 
    2120                 :     /*
    2121                 :      * Has another process decided to give up early and command all processes
    2122                 :      * to skip the unmatched scan?
    2123                 :      */
    2124              30 :     if (batch->skip_unmatched)
    2125                 :     {
    2126 UNC           0 :         hashtable->batches[hashtable->curbatch].done = true;
    2127               0 :         ExecHashTableDetachBatch(hashtable);
    2128               0 :         return false;
    2129                 :     }
    2130                 : 
    2131                 :     /* Now prepare the process local state, just as for non-parallel join. */
    2132 GNC          30 :     ExecPrepHashTableForUnmatched(hjstate);
    2133                 : 
    2134              30 :     return true;
    2135                 : }
    2136                 : 
    2137                 : /*
    2138                 :  * ExecScanHashTableForUnmatched
    2139                 :  *      scan the hash table for unmatched inner tuples
    2140                 :  *
    2141                 :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2142                 :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2143                 :  * for the latter.
    2144                 :  */
    2145 ECB             : bool
    2146 GIC      375958 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
    2147                 : {
    2148 CBC      375958 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2149 GIC      375958 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2150                 : 
    2151 ECB             :     for (;;)
    2152                 :     {
    2153                 :         /*
    2154                 :          * hj_CurTuple is the address of the tuple last returned from the
    2155                 :          * current bucket, or NULL if it's time to start scanning a new
    2156                 :          * bucket.
    2157                 :          */
    2158 CBC     3287190 :         if (hashTuple != NULL)
    2159          373574 :             hashTuple = hashTuple->next.unshared;
    2160 GIC     2913616 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2161 ECB             :         {
    2162 CBC     2911235 :             hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
    2163 GIC     2911235 :             hjstate->hj_CurBucketNo++;
    2164                 :         }
    2165            2381 :         else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
    2166 ECB             :         {
    2167 LBC           0 :             int         j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
    2168                 : 
    2169 UIC           0 :             hashTuple = hashtable->skewBucket[j]->tuples;
    2170               0 :             hjstate->hj_CurSkewBucketNo++;
    2171                 :         }
    2172                 :         else
    2173 CBC        2381 :             break;              /* finished all buckets */
    2174                 : 
    2175 GBC     3469045 :         while (hashTuple != NULL)
    2176 EUB             :         {
    2177 GBC      557813 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2178                 :             {
    2179                 :                 TupleTableSlot *inntuple;
    2180                 : 
    2181 ECB             :                 /* insert hashtable's tuple into exec slot */
    2182 GIC      373577 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2183 ECB             :                                                  hjstate->hj_HashTupleSlot,
    2184                 :                                                  false);    /* do not pfree */
    2185 GIC      373577 :                 econtext->ecxt_innertuple = inntuple;
    2186                 : 
    2187                 :                 /*
    2188                 :                  * Reset temp memory each time; although this function doesn't
    2189                 :                  * do any qual eval, the caller will, so let's keep it
    2190                 :                  * parallel to ExecScanHashBucket.
    2191                 :                  */
    2192          373577 :                 ResetExprContext(econtext);
    2193                 : 
    2194          373577 :                 hjstate->hj_CurTuple = hashTuple;
    2195 CBC      373577 :                 return true;
    2196                 :             }
    2197 ECB             : 
    2198 CBC      184236 :             hashTuple = hashTuple->next.unshared;
    2199                 :         }
    2200                 : 
    2201                 :         /* allow this loop to be cancellable */
    2202 GIC     2911232 :         CHECK_FOR_INTERRUPTS();
    2203                 :     }
    2204                 : 
    2205                 :     /*
    2206                 :      * no more unmatched tuples
    2207 ECB             :      */
    2208 CBC        2381 :     return false;
    2209 ECB             : }
    2210                 : 
    2211                 : /*
    2212                 :  * ExecParallelScanHashTableForUnmatched
    2213                 :  *      scan the hash table for unmatched inner tuples, in parallel join
    2214                 :  *
    2215                 :  * On success, the inner tuple is stored into hjstate->hj_CurTuple and
    2216                 :  * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
    2217                 :  * for the latter.
    2218                 :  */
    2219                 : bool
    2220 GNC       60030 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
    2221                 :                                       ExprContext *econtext)
    2222                 : {
    2223           60030 :     HashJoinTable hashtable = hjstate->hj_HashTable;
    2224           60030 :     HashJoinTuple hashTuple = hjstate->hj_CurTuple;
    2225                 : 
    2226                 :     for (;;)
    2227                 :     {
    2228                 :         /*
    2229                 :          * hj_CurTuple is the address of the tuple last returned from the
    2230                 :          * current bucket, or NULL if it's time to start scanning a new
    2231                 :          * bucket.
    2232                 :          */
    2233          354942 :         if (hashTuple != NULL)
    2234           60000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2235          294942 :         else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
    2236          294912 :             hashTuple = ExecParallelHashFirstTuple(hashtable,
    2237          294912 :                                                    hjstate->hj_CurBucketNo++);
    2238                 :         else
    2239              30 :             break;              /* finished all buckets */
    2240                 : 
    2241          474912 :         while (hashTuple != NULL)
    2242                 :         {
    2243          180000 :             if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
    2244                 :             {
    2245                 :                 TupleTableSlot *inntuple;
    2246                 : 
    2247                 :                 /* insert hashtable's tuple into exec slot */
    2248           60000 :                 inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
    2249                 :                                                  hjstate->hj_HashTupleSlot,
    2250                 :                                                  false);    /* do not pfree */
    2251           60000 :                 econtext->ecxt_innertuple = inntuple;
    2252                 : 
    2253                 :                 /*
    2254                 :                  * Reset temp memory each time; although this function doesn't
    2255                 :                  * do any qual eval, the caller will, so let's keep it
    2256                 :                  * parallel to ExecScanHashBucket.
    2257                 :                  */
    2258           60000 :                 ResetExprContext(econtext);
    2259                 : 
    2260           60000 :                 hjstate->hj_CurTuple = hashTuple;
    2261           60000 :                 return true;
    2262                 :             }
    2263                 : 
    2264          120000 :             hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
    2265                 :         }
    2266                 : 
    2267                 :         /* allow this loop to be cancellable */
    2268          294912 :         CHECK_FOR_INTERRUPTS();
    2269                 :     }
    2270                 : 
    2271                 :     /*
    2272                 :      * no more unmatched tuples
    2273                 :      */
    2274              30 :     return false;
    2275                 : }
    2276                 : 
    2277 ECB             : /*
    2278                 :  * ExecHashTableReset
    2279                 :  *
    2280                 :  *      reset hash table header for new batch
    2281                 :  */
    2282 EUB             : void
    2283 GIC         752 : ExecHashTableReset(HashJoinTable hashtable)
    2284 EUB             : {
    2285                 :     MemoryContext oldcxt;
    2286 GIC         752 :     int         nbuckets = hashtable->nbuckets;
    2287                 : 
    2288 ECB             :     /*
    2289                 :      * Release all the hash buckets and tuples acquired in the prior pass, and
    2290                 :      * reinitialize the context for a new pass.
    2291                 :      */
    2292 CBC         752 :     MemoryContextReset(hashtable->batchCxt);
    2293 GIC         752 :     oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
    2294                 : 
    2295                 :     /* Reallocate and reinitialize the hash bucket headers. */
    2296 GNC         752 :     hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
    2297                 : 
    2298 GIC         752 :     hashtable->spaceUsed = 0;
    2299 ECB             : 
    2300 GIC         752 :     MemoryContextSwitchTo(oldcxt);
    2301                 : 
    2302                 :     /* Forget the chunks (the memory was freed by the context reset above). */
    2303             752 :     hashtable->chunks = NULL;
    2304             752 : }
    2305                 : 
    2306 ECB             : /*
    2307                 :  * ExecHashTableResetMatchFlags
    2308                 :  *      Clear all the HeapTupleHeaderHasMatch flags in the table
    2309                 :  */
    2310                 : void
    2311 GIC           3 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
    2312 ECB             : {
    2313                 :     HashJoinTuple tuple;
    2314                 :     int         i;
    2315                 : 
    2316                 :     /* Reset all flags in the main table ... */
    2317 GIC        3075 :     for (i = 0; i < hashtable->nbuckets; i++)
    2318                 :     {
    2319            3078 :         for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
    2320               6 :              tuple = tuple->next.unshared)
    2321               6 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2322 ECB             :     }
    2323                 : 
    2324                 :     /* ... and the same for the skew buckets, if any */
    2325 GIC           3 :     for (i = 0; i < hashtable->nSkewBuckets; i++)
    2326                 :     {
    2327 UIC           0 :         int         j = hashtable->skewBucketNums[i];
    2328               0 :         HashSkewBucket *skewBucket = hashtable->skewBucket[j];
    2329                 : 
    2330               0 :         for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
    2331               0 :             HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
    2332                 :     }
    2333 GIC           3 : }
    2334 ECB             : 
    2335                 : 
    2336                 : void
    2337 CBC         508 : ExecReScanHash(HashState *node)
    2338 ECB             : {
    2339 GNC         508 :     PlanState  *outerPlan = outerPlanState(node);
    2340                 : 
    2341                 :     /*
    2342                 :      * if chgParam of subnode is not null then plan will be re-scanned by
    2343                 :      * first ExecProcNode.
    2344                 :      */
    2345             508 :     if (outerPlan->chgParam == NULL)
    2346 UNC           0 :         ExecReScan(outerPlan);
    2347 GIC         508 : }
    2348                 : 
    2349 ECB             : 
    2350                 : /*
    2351                 :  * ExecHashBuildSkewHash
    2352                 :  *
    2353                 :  *      Set up for skew optimization if we can identify the most common values
    2354                 :  *      (MCVs) of the outer relation's join key.  We make a skew hash bucket
    2355                 :  *      for the hash value of each MCV, up to the number of slots allowed
    2356                 :  *      based on available memory.
    2357                 :  */
    2358                 : static void
    2359 CBC          60 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
    2360                 : {
    2361                 :     HeapTupleData *statsTuple;
    2362                 :     AttStatsSlot sslot;
    2363                 : 
    2364 ECB             :     /* Do nothing if planner didn't identify the outer relation's join key */
    2365 GIC          60 :     if (!OidIsValid(node->skewTable))
    2366              36 :         return;
    2367 ECB             :     /* Also, do nothing if we don't have room for at least one skew bucket */
    2368 GIC          60 :     if (mcvsToUse <= 0)
    2369 UIC           0 :         return;
    2370                 : 
    2371                 :     /*
    2372                 :      * Try to find the MCV statistics for the outer relation's join key.
    2373                 :      */
    2374 CBC          60 :     statsTuple = SearchSysCache3(STATRELATTINH,
    2375                 :                                  ObjectIdGetDatum(node->skewTable),
    2376              60 :                                  Int16GetDatum(node->skewColumn),
    2377              60 :                                  BoolGetDatum(node->skewInherit));
    2378 GIC          60 :     if (!HeapTupleIsValid(statsTuple))
    2379              36 :         return;
    2380 ECB             : 
    2381 GIC          24 :     if (get_attstatsslot(&sslot, statsTuple,
    2382                 :                          STATISTIC_KIND_MCV, InvalidOid,
    2383                 :                          ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
    2384 ECB             :     {
    2385                 :         double      frac;
    2386                 :         int         nbuckets;
    2387                 :         FmgrInfo   *hashfunctions;
    2388                 :         int         i;
    2389                 : 
    2390 CBC           3 :         if (mcvsToUse > sslot.nvalues)
    2391 UIC           0 :             mcvsToUse = sslot.nvalues;
    2392                 : 
    2393                 :         /*
    2394                 :          * Calculate the expected fraction of outer relation that will
    2395                 :          * participate in the skew optimization.  If this isn't at least
    2396                 :          * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
    2397                 :          */
    2398 GIC           3 :         frac = 0;
    2399 CBC          66 :         for (i = 0; i < mcvsToUse; i++)
    2400 GIC          63 :             frac += sslot.numbers[i];
    2401               3 :         if (frac < SKEW_MIN_OUTER_FRACTION)
    2402 ECB             :         {
    2403 UIC           0 :             free_attstatsslot(&sslot);
    2404               0 :             ReleaseSysCache(statsTuple);
    2405               0 :             return;
    2406                 :         }
    2407                 : 
    2408 ECB             :         /*
    2409                 :          * Okay, set up the skew hashtable.
    2410                 :          *
    2411                 :          * skewBucket[] is an open addressing hashtable with a power of 2 size
    2412                 :          * that is greater than the number of MCV values.  (This ensures there
    2413                 :          * will be at least one null entry, so searches will always
    2414                 :          * terminate.)
    2415                 :          *
    2416                 :          * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
    2417                 :          * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
    2418                 :          * since we limit pg_statistic entries to much less than that.
    2419                 :          */
    2420 CBC           3 :         nbuckets = pg_nextpower2_32(mcvsToUse + 1);
    2421                 :         /* use two more bits just to help avoid collisions */
    2422 GIC           3 :         nbuckets <<= 2;
    2423                 : 
    2424               3 :         hashtable->skewEnabled = true;
    2425               3 :         hashtable->skewBucketLen = nbuckets;
    2426                 : 
    2427 ECB             :         /*
    2428                 :          * We allocate the bucket memory in the hashtable's batch context. It
    2429                 :          * is only needed during the first batch, and this ensures it will be
    2430                 :          * automatically removed once the first batch is done.
    2431                 :          */
    2432 GIC           3 :         hashtable->skewBucket = (HashSkewBucket **)
    2433 CBC           3 :             MemoryContextAllocZero(hashtable->batchCxt,
    2434                 :                                    nbuckets * sizeof(HashSkewBucket *));
    2435               3 :         hashtable->skewBucketNums = (int *)
    2436               3 :             MemoryContextAllocZero(hashtable->batchCxt,
    2437 ECB             :                                    mcvsToUse * sizeof(int));
    2438                 : 
    2439 GIC           3 :         hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
    2440               3 :             + mcvsToUse * sizeof(int);
    2441 CBC           3 :         hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
    2442 GIC           3 :             + mcvsToUse * sizeof(int);
    2443 GBC           3 :         if (hashtable->spaceUsed > hashtable->spacePeak)
    2444               3 :             hashtable->spacePeak = hashtable->spaceUsed;
    2445                 : 
    2446 EUB             :         /*
    2447                 :          * Create a skew bucket for each MCV hash value.
    2448                 :          *
    2449 ECB             :          * Note: it is very important that we create the buckets in order of
    2450                 :          * decreasing MCV frequency.  If we have to remove some buckets, they
    2451                 :          * must be removed in reverse order of creation (see notes in
    2452                 :          * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
    2453                 :          * be removed first.
    2454                 :          */
    2455 CBC           3 :         hashfunctions = hashtable->outer_hashfunctions;
    2456                 : 
    2457 GIC          66 :         for (i = 0; i < mcvsToUse; i++)
    2458                 :         {
    2459                 :             uint32      hashvalue;
    2460                 :             int         bucket;
    2461 ECB             : 
    2462 GBC          63 :             hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
    2463 CBC          63 :                                                          hashtable->collations[0],
    2464 GIC          63 :                                                          sslot.values[i]));
    2465                 : 
    2466                 :             /*
    2467                 :              * While we have not hit a hole in the hashtable and have not hit
    2468                 :              * the desired bucket, we have collided with some previous hash
    2469                 :              * value, so try the next bucket location.  NB: this code must
    2470                 :              * match ExecHashGetSkewBucket.
    2471                 :              */
    2472              63 :             bucket = hashvalue & (nbuckets - 1);
    2473              63 :             while (hashtable->skewBucket[bucket] != NULL &&
    2474 UIC           0 :                    hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2475 LBC           0 :                 bucket = (bucket + 1) & (nbuckets - 1);
    2476                 : 
    2477                 :             /*
    2478                 :              * If we found an existing bucket with the same hashvalue, leave
    2479                 :              * it alone.  It's okay for two MCVs to share a hashvalue.
    2480                 :              */
    2481 CBC          63 :             if (hashtable->skewBucket[bucket] != NULL)
    2482 LBC           0 :                 continue;
    2483                 : 
    2484 ECB             :             /* Okay, create a new skew bucket for this hashvalue. */
    2485 GBC         126 :             hashtable->skewBucket[bucket] = (HashSkewBucket *)
    2486 GIC          63 :                 MemoryContextAlloc(hashtable->batchCxt,
    2487                 :                                    sizeof(HashSkewBucket));
    2488              63 :             hashtable->skewBucket[bucket]->hashvalue = hashvalue;
    2489              63 :             hashtable->skewBucket[bucket]->tuples = NULL;
    2490 CBC          63 :             hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
    2491 GIC          63 :             hashtable->nSkewBuckets++;
    2492 CBC          63 :             hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
    2493              63 :             hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
    2494              63 :             if (hashtable->spaceUsed > hashtable->spacePeak)
    2495              63 :                 hashtable->spacePeak = hashtable->spaceUsed;
    2496                 :         }
    2497 ECB             : 
    2498 GIC           3 :         free_attstatsslot(&sslot);
    2499                 :     }
    2500                 : 
    2501              24 :     ReleaseSysCache(statsTuple);
    2502                 : }
    2503                 : 
    2504                 : /*
    2505                 :  * ExecHashGetSkewBucket
    2506 ECB             :  *
    2507 EUB             :  *      Returns the index of the skew bucket for this hashvalue,
    2508                 :  *      or INVALID_SKEW_BUCKET_NO if the hashvalue is not
    2509                 :  *      associated with any active skew bucket.
    2510                 :  */
    2511                 : int
    2512 GIC    14046374 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
    2513                 : {
    2514 ECB             :     int         bucket;
    2515                 : 
    2516                 :     /*
    2517                 :      * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
    2518                 :      * particular, this happens after the initial batch is done).
    2519 EUB             :      */
    2520 GBC    14046374 :     if (!hashtable->skewEnabled)
    2521        13986374 :         return INVALID_SKEW_BUCKET_NO;
    2522                 : 
    2523                 :     /*
    2524                 :      * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
    2525                 :      */
    2526 GIC       60000 :     bucket = hashvalue & (hashtable->skewBucketLen - 1);
    2527                 : 
    2528                 :     /*
    2529                 :      * While we have not hit a hole in the hashtable and have not hit the
    2530                 :      * desired bucket, we have collided with some other hash value, so try the
    2531                 :      * next bucket location.
    2532                 :      */
    2533           63915 :     while (hashtable->skewBucket[bucket] != NULL &&
    2534            5409 :            hashtable->skewBucket[bucket]->hashvalue != hashvalue)
    2535            3915 :         bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
    2536 ECB             : 
    2537                 :     /*
    2538                 :      * Found the desired bucket?
    2539                 :      */
    2540 CBC       60000 :     if (hashtable->skewBucket[bucket] != NULL)
    2541            1494 :         return bucket;
    2542                 : 
    2543                 :     /*
    2544                 :      * There must not be any hashtable entry for this hash value.
    2545                 :      */
    2546 GIC       58506 :     return INVALID_SKEW_BUCKET_NO;
    2547                 : }
    2548 ECB             : 
    2549                 : /*
    2550                 :  * ExecHashSkewTableInsert
    2551                 :  *
    2552                 :  *      Insert a tuple into the skew hashtable.
    2553                 :  *
    2554                 :  * This should generally match up with the current-batch case in
    2555                 :  * ExecHashTableInsert.
    2556                 :  */
    2557                 : static void
    2558 CBC         294 : ExecHashSkewTableInsert(HashJoinTable hashtable,
    2559 ECB             :                         TupleTableSlot *slot,
    2560                 :                         uint32 hashvalue,
    2561                 :                         int bucketNumber)
    2562                 : {
    2563                 :     bool        shouldFree;
    2564 GIC         294 :     MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
    2565                 :     HashJoinTuple hashTuple;
    2566                 :     int         hashTupleSize;
    2567                 : 
    2568                 :     /* Create the HashJoinTuple */
    2569             294 :     hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2570             294 :     hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
    2571 ECB             :                                                    hashTupleSize);
    2572 GIC         294 :     hashTuple->hashvalue = hashvalue;
    2573 CBC         294 :     memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
    2574 GIC         294 :     HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
    2575                 : 
    2576                 :     /* Push it onto the front of the skew bucket's list */
    2577             294 :     hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
    2578 CBC         294 :     hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
    2579             294 :     Assert(hashTuple != hashTuple->next.unshared);
    2580 ECB             : 
    2581                 :     /* Account for space used, and back off if we've used too much */
    2582 GIC         294 :     hashtable->spaceUsed += hashTupleSize;
    2583             294 :     hashtable->spaceUsedSkew += hashTupleSize;
    2584             294 :     if (hashtable->spaceUsed > hashtable->spacePeak)
    2585             216 :         hashtable->spacePeak = hashtable->spaceUsed;
    2586             345 :     while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
    2587              51 :         ExecHashRemoveNextSkewBucket(hashtable);
    2588 ECB             : 
    2589                 :     /* Check we are not over the total spaceAllowed, either */
    2590 GBC         294 :     if (hashtable->spaceUsed > hashtable->spaceAllowed)
    2591 UBC           0 :         ExecHashIncreaseNumBatches(hashtable);
    2592                 : 
    2593 GIC         294 :     if (shouldFree)
    2594             294 :         heap_free_minimal_tuple(tuple);
    2595             294 : }
    2596                 : 
    2597 ECB             : /*
    2598 EUB             :  *      ExecHashRemoveNextSkewBucket
    2599                 :  *
    2600                 :  *      Remove the least valuable skew bucket by pushing its tuples into
    2601 ECB             :  *      the main hash table.
    2602                 :  */
    2603                 : static void
    2604 CBC          51 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
    2605 ECB             : {
    2606                 :     int         bucketToRemove;
    2607                 :     HashSkewBucket *bucket;
    2608                 :     uint32      hashvalue;
    2609                 :     int         bucketno;
    2610                 :     int         batchno;
    2611                 :     HashJoinTuple hashTuple;
    2612                 : 
    2613                 :     /* Locate the bucket to remove */
    2614 CBC          51 :     bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
    2615 GIC          51 :     bucket = hashtable->skewBucket[bucketToRemove];
    2616                 : 
    2617 ECB             :     /*
    2618                 :      * Calculate which bucket and batch the tuples belong to in the main
    2619                 :      * hashtable.  They all have the same hash value, so it's the same for all
    2620                 :      * of them.  Also note that it's not possible for nbatch to increase while
    2621                 :      * we are processing the tuples.
    2622                 :      */
    2623 GIC          51 :     hashvalue = bucket->hashvalue;
    2624              51 :     ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
    2625                 : 
    2626                 :     /* Process all tuples in the bucket */
    2627              51 :     hashTuple = bucket->tuples;
    2628 CBC         225 :     while (hashTuple != NULL)
    2629                 :     {
    2630 GIC         174 :         HashJoinTuple nextHashTuple = hashTuple->next.unshared;
    2631                 :         MinimalTuple tuple;
    2632                 :         Size        tupleSize;
    2633                 : 
    2634                 :         /*
    2635                 :          * This code must agree with ExecHashTableInsert.  We do not use
    2636 ECB             :          * ExecHashTableInsert directly as ExecHashTableInsert expects a
    2637                 :          * TupleTableSlot while we already have HashJoinTuples.
    2638                 :          */
    2639 GIC         174 :         tuple = HJTUPLE_MINTUPLE(hashTuple);
    2640             174 :         tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
    2641                 : 
    2642 ECB             :         /* Decide whether to put the tuple in the hash table or a temp file */
    2643 GIC         174 :         if (batchno == hashtable->curbatch)
    2644                 :         {
    2645                 :             /* Move the tuple to the main hash table */
    2646                 :             HashJoinTuple copyTuple;
    2647                 : 
    2648                 :             /*
    2649 ECB             :              * We must copy the tuple into the dense storage, else it will not
    2650                 :              * be found by, eg, ExecHashIncreaseNumBatches.
    2651                 :              */
    2652 GIC          69 :             copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
    2653              69 :             memcpy(copyTuple, hashTuple, tupleSize);
    2654              69 :             pfree(hashTuple);
    2655                 : 
    2656 CBC          69 :             copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
    2657              69 :             hashtable->buckets.unshared[bucketno] = copyTuple;
    2658                 : 
    2659                 :             /* We have reduced skew space, but overall space doesn't change */
    2660 GIC          69 :             hashtable->spaceUsedSkew -= tupleSize;
    2661                 :         }
    2662 ECB             :         else
    2663                 :         {
    2664                 :             /* Put the tuple into a temp file for later batches */
    2665 GIC         105 :             Assert(batchno > hashtable->curbatch);
    2666             105 :             ExecHashJoinSaveTuple(tuple, hashvalue,
    2667             105 :                                   &hashtable->innerBatchFile[batchno]);
    2668             105 :             pfree(hashTuple);
    2669             105 :             hashtable->spaceUsed -= tupleSize;
    2670             105 :             hashtable->spaceUsedSkew -= tupleSize;
    2671                 :         }
    2672                 : 
    2673             174 :         hashTuple = nextHashTuple;
    2674 ECB             : 
    2675                 :         /* allow this loop to be cancellable */
    2676 GIC         174 :         CHECK_FOR_INTERRUPTS();
    2677                 :     }
    2678                 : 
    2679                 :     /*
    2680 ECB             :      * Free the bucket struct itself and reset the hashtable entry to NULL.
    2681                 :      *
    2682                 :      * NOTE: this is not nearly as simple as it looks on the surface, because
    2683                 :      * of the possibility of collisions in the hashtable.  Suppose that hash
    2684                 :      * values A and B collide at a particular hashtable entry, and that A was
    2685                 :      * entered first so B gets shifted to a different table entry.  If we were
    2686                 :      * to remove A first then ExecHashGetSkewBucket would mistakenly start
    2687                 :      * reporting that B is not in the hashtable, because it would hit the NULL
    2688                 :      * before finding B.  However, we always remove entries in the reverse
    2689                 :      * order of creation, so this failure cannot happen.
    2690                 :      */
    2691 GIC          51 :     hashtable->skewBucket[bucketToRemove] = NULL;
    2692              51 :     hashtable->nSkewBuckets--;
    2693 CBC          51 :     pfree(bucket);
    2694              51 :     hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
    2695              51 :     hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
    2696                 : 
    2697                 :     /*
    2698 ECB             :      * If we have removed all skew buckets then give up on skew optimization.
    2699                 :      * Release the arrays since they aren't useful any more.
    2700                 :      */
    2701 CBC          51 :     if (hashtable->nSkewBuckets == 0)
    2702 ECB             :     {
    2703 LBC           0 :         hashtable->skewEnabled = false;
    2704 UIC           0 :         pfree(hashtable->skewBucket);
    2705               0 :         pfree(hashtable->skewBucketNums);
    2706 LBC           0 :         hashtable->skewBucket = NULL;
    2707 UBC           0 :         hashtable->skewBucketNums = NULL;
    2708 UIC           0 :         hashtable->spaceUsed -= hashtable->spaceUsedSkew;
    2709 LBC           0 :         hashtable->spaceUsedSkew = 0;
    2710 ECB             :     }
    2711 CBC          51 : }
    2712                 : 
    2713                 : /*
    2714                 :  * Reserve space in the DSM segment for instrumentation data.
    2715                 :  */
    2716                 : void
    2717 GIC          93 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
    2718                 : {
    2719                 :     size_t      size;
    2720 ECB             : 
    2721                 :     /* don't need this if not instrumenting or no workers */
    2722 GIC          93 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2723              51 :         return;
    2724                 : 
    2725              42 :     size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
    2726              42 :     size = add_size(size, offsetof(SharedHashInfo, hinstrument));
    2727              42 :     shm_toc_estimate_chunk(&pcxt->estimator, size);
    2728              42 :     shm_toc_estimate_keys(&pcxt->estimator, 1);
    2729                 : }
    2730 ECB             : 
    2731                 : /*
    2732                 :  * Set up a space in the DSM for all workers to record instrumentation data
    2733                 :  * about their hash table.
    2734                 :  */
    2735                 : void
    2736 GIC          93 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
    2737                 : {
    2738                 :     size_t      size;
    2739 ECB             : 
    2740                 :     /* don't need this if not instrumenting or no workers */
    2741 GIC          93 :     if (!node->ps.instrument || pcxt->nworkers == 0)
    2742              51 :         return;
    2743 ECB             : 
    2744 CBC          42 :     size = offsetof(SharedHashInfo, hinstrument) +
    2745 GIC          42 :         pcxt->nworkers * sizeof(HashInstrumentation);
    2746 CBC          42 :     node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
    2747                 : 
    2748                 :     /* Each per-worker area must start out as zeroes. */
    2749 GIC          42 :     memset(node->shared_info, 0, size);
    2750                 : 
    2751              42 :     node->shared_info->num_workers = pcxt->nworkers;
    2752              42 :     shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
    2753              42 :                    node->shared_info);
    2754                 : }
    2755 ECB             : 
    2756                 : /*
    2757                 :  * Locate the DSM space for hash table instrumentation data that we'll write
    2758                 :  * to at shutdown time.
    2759                 :  */
    2760                 : void
    2761 GIC         267 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
    2762                 : {
    2763                 :     SharedHashInfo *shared_info;
    2764                 : 
    2765                 :     /* don't need this if not instrumenting */
    2766             267 :     if (!node->ps.instrument)
    2767             141 :         return;
    2768 ECB             : 
    2769                 :     /*
    2770                 :      * Find our entry in the shared area, and set up a pointer to it so that
    2771                 :      * we'll accumulate stats there when shutting down or rebuilding the hash
    2772                 :      * table.
    2773                 :      */
    2774                 :     shared_info = (SharedHashInfo *)
    2775 GIC         126 :         shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
    2776 CBC         126 :     node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
    2777                 : }
    2778                 : 
    2779                 : /*
    2780                 :  * Collect EXPLAIN stats if needed, saving them into DSM memory if
    2781 ECB             :  * ExecHashInitializeWorker was called, or local storage if not.  In the
    2782                 :  * parallel case, this must be done in ExecShutdownHash() rather than
    2783                 :  * ExecEndHash() because the latter runs after we've detached from the DSM
    2784                 :  * segment.
    2785                 :  */
    2786                 : void
    2787 GIC       12754 : ExecShutdownHash(HashState *node)
    2788                 : {
    2789 ECB             :     /* Allocate save space if EXPLAIN'ing and we didn't do so already */
    2790 GIC       12754 :     if (node->ps.instrument && !node->hinstrument)
    2791 GNC          54 :         node->hinstrument = palloc0_object(HashInstrumentation);
    2792                 :     /* Now accumulate data for the current (final) hash table */
    2793 GIC       12754 :     if (node->hinstrument && node->hashtable)
    2794             164 :         ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
    2795           12754 : }
    2796                 : 
    2797                 : /*
    2798                 :  * Retrieve instrumentation data from workers before the DSM segment is
    2799                 :  * detached, so that EXPLAIN can access it.
    2800                 :  */
    2801                 : void
    2802              42 : ExecHashRetrieveInstrumentation(HashState *node)
    2803                 : {
    2804              42 :     SharedHashInfo *shared_info = node->shared_info;
    2805                 :     size_t      size;
    2806 ECB             : 
    2807 CBC          42 :     if (shared_info == NULL)
    2808 LBC           0 :         return;
    2809 ECB             : 
    2810                 :     /* Replace node->shared_info with a copy in backend-local memory. */
    2811 GIC          42 :     size = offsetof(SharedHashInfo, hinstrument) +
    2812              42 :         shared_info->num_workers * sizeof(HashInstrumentation);
    2813              42 :     node->shared_info = palloc(size);
    2814              42 :     memcpy(node->shared_info, shared_info, size);
    2815                 : }
    2816 ECB             : 
    2817                 : /*
    2818 EUB             :  * Accumulate instrumentation data from 'hashtable' into an
    2819                 :  * initially-zeroed HashInstrumentation struct.
    2820                 :  *
    2821                 :  * This is used to merge information across successive hash table instances
    2822                 :  * within a single plan node.  We take the maximum values of each interesting
    2823                 :  * number.  The largest nbuckets and largest nbatch values might have occurred
    2824                 :  * in different instances, so there's some risk of confusion from reporting
    2825                 :  * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
    2826 ECB             :  * issue if we don't report the largest values.  Similarly, we want to report
    2827                 :  * the largest spacePeak regardless of whether it happened in the same
    2828                 :  * instance as the largest nbuckets or nbatch.  All the instances should have
    2829                 :  * the same nbuckets_original and nbatch_original; but there's little value
    2830                 :  * in depending on that here, so handle them the same way.
    2831                 :  */
    2832                 : void
    2833 GIC         164 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
    2834                 :                              HashJoinTable hashtable)
    2835                 : {
    2836             164 :     instrument->nbuckets = Max(instrument->nbuckets,
    2837 ECB             :                                hashtable->nbuckets);
    2838 CBC         164 :     instrument->nbuckets_original = Max(instrument->nbuckets_original,
    2839                 :                                         hashtable->nbuckets_original);
    2840             164 :     instrument->nbatch = Max(instrument->nbatch,
    2841 ECB             :                              hashtable->nbatch);
    2842 CBC         164 :     instrument->nbatch_original = Max(instrument->nbatch_original,
    2843 ECB             :                                       hashtable->nbatch_original);
    2844 GIC         164 :     instrument->space_peak = Max(instrument->space_peak,
    2845                 :                                  hashtable->spacePeak);
    2846             164 : }
    2847                 : 
    2848                 : /*
    2849                 :  * Allocate 'size' bytes from the currently active HashMemoryChunk
    2850                 :  */
    2851 ECB             : static void *
    2852 GIC     5581934 : dense_alloc(HashJoinTable hashtable, Size size)
    2853                 : {
    2854                 :     HashMemoryChunk newChunk;
    2855                 :     char       *ptr;
    2856 ECB             : 
    2857                 :     /* just in case the size is not already aligned properly */
    2858 GIC     5581934 :     size = MAXALIGN(size);
    2859 ECB             : 
    2860                 :     /*
    2861                 :      * If tuple size is larger than threshold, allocate a separate chunk.
    2862                 :      */
    2863 GIC     5581934 :     if (size > HASH_CHUNK_THRESHOLD)
    2864 ECB             :     {
    2865                 :         /* allocate new chunk and put it at the beginning of the list */
    2866 LBC           0 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2867 ECB             :                                                         HASH_CHUNK_HEADER_SIZE + size);
    2868 LBC           0 :         newChunk->maxlen = size;
    2869 UIC           0 :         newChunk->used = size;
    2870               0 :         newChunk->ntuples = 1;
    2871                 : 
    2872                 :         /*
    2873                 :          * Add this chunk to the list after the first existing chunk, so that
    2874                 :          * we don't lose the remaining space in the "current" chunk.
    2875                 :          */
    2876 LBC           0 :         if (hashtable->chunks != NULL)
    2877                 :         {
    2878 UIC           0 :             newChunk->next = hashtable->chunks->next;
    2879               0 :             hashtable->chunks->next.unshared = newChunk;
    2880                 :         }
    2881 ECB             :         else
    2882                 :         {
    2883 UIC           0 :             newChunk->next.unshared = hashtable->chunks;
    2884               0 :             hashtable->chunks = newChunk;
    2885                 :         }
    2886                 : 
    2887               0 :         return HASH_CHUNK_DATA(newChunk);
    2888                 :     }
    2889                 : 
    2890 ECB             :     /*
    2891                 :      * See if we have enough space for it in the current chunk (if any). If
    2892                 :      * not, allocate a fresh chunk.
    2893                 :      */
    2894 GIC     5581934 :     if ((hashtable->chunks == NULL) ||
    2895         5572045 :         (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
    2896                 :     {
    2897                 :         /* allocate new chunk and put it at the beginning of the list */
    2898           17032 :         newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
    2899                 :                                                         HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
    2900                 : 
    2901           17032 :         newChunk->maxlen = HASH_CHUNK_SIZE;
    2902 CBC       17032 :         newChunk->used = size;
    2903 GIC       17032 :         newChunk->ntuples = 1;
    2904                 : 
    2905 CBC       17032 :         newChunk->next.unshared = hashtable->chunks;
    2906           17032 :         hashtable->chunks = newChunk;
    2907                 : 
    2908           17032 :         return HASH_CHUNK_DATA(newChunk);
    2909 ECB             :     }
    2910                 : 
    2911                 :     /* There is enough space in the current chunk, let's add the tuple */
    2912 GIC     5564902 :     ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
    2913         5564902 :     hashtable->chunks->used += size;
    2914         5564902 :     hashtable->chunks->ntuples += 1;
    2915                 : 
    2916                 :     /* return pointer to the start of the tuple memory */
    2917 CBC     5564902 :     return ptr;
    2918                 : }
    2919 ECB             : 
    2920                 : /*
    2921                 :  * Allocate space for a tuple in shared dense storage.  This is equivalent to
    2922                 :  * dense_alloc but for Parallel Hash using shared memory.
    2923 EUB             :  *
    2924                 :  * While loading a tuple into shared memory, we might run out of memory and
    2925                 :  * decide to repartition, or determine that the load factor is too high and
    2926 ECB             :  * decide to expand the bucket array, or discover that another participant has
    2927                 :  * commanded us to help do that.  Return NULL if number of buckets or batches
    2928                 :  * has changed, indicating that the caller must retry (considering the
    2929                 :  * possibility that the tuple no longer belongs in the same batch).
    2930                 :  */
    2931                 : static HashJoinTuple
    2932 GIC     1191074 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
    2933                 :                            dsa_pointer *shared)
    2934                 : {
    2935         1191074 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    2936                 :     dsa_pointer chunk_shared;
    2937                 :     HashMemoryChunk chunk;
    2938                 :     Size        chunk_size;
    2939                 :     HashJoinTuple result;
    2940         1191074 :     int         curbatch = hashtable->curbatch;
    2941                 : 
    2942         1191074 :     size = MAXALIGN(size);
    2943                 : 
    2944                 :     /*
    2945                 :      * Fast path: if there is enough space in this backend's current chunk,
    2946                 :      * then we can allocate without any locking.
    2947                 :      */
    2948 CBC     1191074 :     chunk = hashtable->current_chunk;
    2949 GIC     1191074 :     if (chunk != NULL &&
    2950         1190527 :         size <= HASH_CHUNK_THRESHOLD &&
    2951 CBC     1190527 :         chunk->maxlen - chunk->used >= size)
    2952                 :     {
    2953 ECB             : 
    2954 GIC     1189185 :         chunk_shared = hashtable->current_chunk_shared;
    2955 CBC     1189185 :         Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
    2956 GIC     1189185 :         *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
    2957 CBC     1189185 :         result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
    2958 GIC     1189185 :         chunk->used += size;
    2959 ECB             : 
    2960 GIC     1189185 :         Assert(chunk->used <= chunk->maxlen);
    2961 CBC     1189185 :         Assert(result == dsa_get_address(hashtable->area, *shared));
    2962                 : 
    2963 GIC     1189185 :         return result;
    2964                 :     }
    2965                 : 
    2966                 :     /* Slow path: try to allocate a new chunk. */
    2967 CBC        1889 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    2968                 : 
    2969                 :     /*
    2970                 :      * Check if we need to help increase the number of buckets or batches.
    2971                 :      */
    2972 GIC        1889 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    2973 CBC        1870 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2974                 :     {
    2975 GIC          91 :         ParallelHashGrowth growth = pstate->growth;
    2976                 : 
    2977              91 :         hashtable->current_chunk = NULL;
    2978 CBC          91 :         LWLockRelease(&pstate->lock);
    2979                 : 
    2980                 :         /* Another participant has commanded us to help grow. */
    2981 GBC          91 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    2982 GIC          19 :             ExecParallelHashIncreaseNumBatches(hashtable);
    2983 GBC          72 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    2984              72 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    2985 EUB             : 
    2986                 :         /* The caller must retry. */
    2987 GIC          91 :         return NULL;
    2988                 :     }
    2989                 : 
    2990                 :     /* Oversized tuples get their own chunk. */
    2991 GBC        1798 :     if (size > HASH_CHUNK_THRESHOLD)
    2992 GIC          24 :         chunk_size = size + HASH_CHUNK_HEADER_SIZE;
    2993 EUB             :     else
    2994 GBC        1774 :         chunk_size = HASH_CHUNK_SIZE;
    2995                 : 
    2996                 :     /* Check if it's time to grow batches or buckets. */
    2997 GIC        1798 :     if (pstate->growth != PHJ_GROWTH_DISABLED)
    2998 EUB             :     {
    2999 GBC         922 :         Assert(curbatch == 0);
    3000 GNC         922 :         Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
    3001                 : 
    3002 EUB             :         /*
    3003                 :          * Check if our space limit would be exceeded.  To avoid choking on
    3004                 :          * very large tuples or very low hash_mem setting, we'll always allow
    3005                 :          * each backend to allocate at least one chunk.
    3006                 :          */
    3007 GIC         922 :         if (hashtable->batches[0].at_least_one_chunk &&
    3008             709 :             hashtable->batches[0].shared->size +
    3009 CBC         709 :             chunk_size > pstate->space_allowed)
    3010 ECB             :         {
    3011 GIC          19 :             pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3012              19 :             hashtable->batches[0].shared->space_exhausted = true;
    3013 CBC          19 :             LWLockRelease(&pstate->lock);
    3014                 : 
    3015 GIC          19 :             return NULL;
    3016 ECB             :         }
    3017                 : 
    3018                 :         /* Check if our load factor limit would be exceeded. */
    3019 GIC         903 :         if (hashtable->nbatch == 1)
    3020 ECB             :         {
    3021 CBC         792 :             hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
    3022 GIC         792 :             hashtable->batches[0].ntuples = 0;
    3023 ECB             :             /* Guard against integer overflow and alloc size overflow */
    3024 GIC         792 :             if (hashtable->batches[0].shared->ntuples + 1 >
    3025             792 :                 hashtable->nbuckets * NTUP_PER_BUCKET &&
    3026              54 :                 hashtable->nbuckets < (INT_MAX / 2) &&
    3027 CBC          54 :                 hashtable->nbuckets * 2 <=
    3028 ECB             :                 MaxAllocSize / sizeof(dsa_pointer_atomic))
    3029                 :             {
    3030 GIC          54 :                 pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
    3031              54 :                 LWLockRelease(&pstate->lock);
    3032 ECB             : 
    3033 GIC          54 :                 return NULL;
    3034                 :             }
    3035                 :         }
    3036                 :     }
    3037                 : 
    3038                 :     /* We are cleared to allocate a new chunk. */
    3039            1725 :     chunk_shared = dsa_allocate(hashtable->area, chunk_size);
    3040            1725 :     hashtable->batches[curbatch].shared->size += chunk_size;
    3041            1725 :     hashtable->batches[curbatch].at_least_one_chunk = true;
    3042                 : 
    3043                 :     /* Set up the chunk. */
    3044            1725 :     chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
    3045            1725 :     *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
    3046            1725 :     chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
    3047 CBC        1725 :     chunk->used = size;
    3048                 : 
    3049                 :     /*
    3050 ECB             :      * Push it onto the list of chunks, so that it can be found if we need to
    3051                 :      * increase the number of buckets or batches (batch 0 only) and later for
    3052                 :      * freeing the memory (all batches).
    3053                 :      */
    3054 GIC        1725 :     chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
    3055 CBC        1725 :     hashtable->batches[curbatch].shared->chunks = chunk_shared;
    3056                 : 
    3057            1725 :     if (size <= HASH_CHUNK_THRESHOLD)
    3058                 :     {
    3059                 :         /*
    3060                 :          * Make this the current chunk so that we can use the fast path to
    3061                 :          * fill the rest of it up in future calls.
    3062                 :          */
    3063            1707 :         hashtable->current_chunk = chunk;
    3064            1707 :         hashtable->current_chunk_shared = chunk_shared;
    3065 ECB             :     }
    3066 CBC        1725 :     LWLockRelease(&pstate->lock);
    3067                 : 
    3068 GIC        1725 :     Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
    3069 CBC        1725 :     result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
    3070 ECB             : 
    3071 CBC        1725 :     return result;
    3072 ECB             : }
    3073                 : 
    3074                 : /*
    3075                 :  * One backend needs to set up the shared batch state including tuplestores.
    3076                 :  * Other backends will ensure they have correctly configured accessors by
    3077                 :  * called ExecParallelHashEnsureBatchAccessors().
    3078                 :  */
    3079                 : static void
    3080 GIC         109 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
    3081                 : {
    3082 CBC         109 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3083                 :     ParallelHashJoinBatch *batches;
    3084                 :     MemoryContext oldcxt;
    3085                 :     int         i;
    3086                 : 
    3087             109 :     Assert(hashtable->batches == NULL);
    3088 ECB             : 
    3089                 :     /* Allocate space. */
    3090 CBC         109 :     pstate->batches =
    3091 GIC         109 :         dsa_allocate0(hashtable->area,
    3092 ECB             :                       EstimateParallelHashJoinBatch(hashtable) * nbatch);
    3093 CBC         109 :     pstate->nbatch = nbatch;
    3094 GIC         109 :     batches = dsa_get_address(hashtable->area, pstate->batches);
    3095                 : 
    3096 ECB             :     /* Use hash join memory context. */
    3097 CBC         109 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    3098 ECB             : 
    3099                 :     /* Allocate this backend's accessor array. */
    3100 GIC         109 :     hashtable->nbatch = nbatch;
    3101 GNC         109 :     hashtable->batches =
    3102             109 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3103                 : 
    3104                 :     /* Set up the shared state, tuplestores and backend-local accessors. */
    3105 GIC         595 :     for (i = 0; i < hashtable->nbatch; ++i)
    3106 ECB             :     {
    3107 CBC         486 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3108 GIC         486 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3109 ECB             :         char        name[MAXPGPATH];
    3110                 : 
    3111                 :         /*
    3112                 :          * All members of shared were zero-initialized.  We just need to set
    3113                 :          * up the Barrier.
    3114                 :          */
    3115 CBC         486 :         BarrierInit(&shared->batch_barrier, 0);
    3116 GIC         486 :         if (i == 0)
    3117                 :         {
    3118                 :             /* Batch 0 doesn't need to be loaded. */
    3119             109 :             BarrierAttach(&shared->batch_barrier);
    3120 GNC         436 :             while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
    3121 GIC         327 :                 BarrierArriveAndWait(&shared->batch_barrier, 0);
    3122 CBC         109 :             BarrierDetach(&shared->batch_barrier);
    3123 ECB             :         }
    3124                 : 
    3125                 :         /* Initialize accessor state.  All members were zero-initialized. */
    3126 CBC         486 :         accessor->shared = shared;
    3127 ECB             : 
    3128                 :         /* Initialize the shared tuplestores. */
    3129 GIC         486 :         snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
    3130 CBC         486 :         accessor->inner_tuples =
    3131 GIC         486 :             sts_initialize(ParallelHashJoinBatchInner(shared),
    3132                 :                            pstate->nparticipants,
    3133                 :                            ParallelWorkerNumber + 1,
    3134 ECB             :                            sizeof(uint32),
    3135                 :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3136                 :                            &pstate->fileset,
    3137                 :                            name);
    3138 GIC         486 :         snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
    3139 CBC         486 :         accessor->outer_tuples =
    3140             486 :             sts_initialize(ParallelHashJoinBatchOuter(shared,
    3141 ECB             :                                                       pstate->nparticipants),
    3142                 :                            pstate->nparticipants,
    3143                 :                            ParallelWorkerNumber + 1,
    3144                 :                            sizeof(uint32),
    3145                 :                            SHARED_TUPLESTORE_SINGLE_PASS,
    3146                 :                            &pstate->fileset,
    3147                 :                            name);
    3148                 :     }
    3149                 : 
    3150 GIC         109 :     MemoryContextSwitchTo(oldcxt);
    3151             109 : }
    3152                 : 
    3153                 : /*
    3154 ECB             :  * Free the current set of ParallelHashJoinBatchAccessor objects.
    3155                 :  */
    3156                 : static void
    3157 GIC          34 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
    3158                 : {
    3159 ECB             :     int         i;
    3160                 : 
    3161 CBC         156 :     for (i = 0; i < hashtable->nbatch; ++i)
    3162 ECB             :     {
    3163                 :         /* Make sure no files are left open. */
    3164 GIC         122 :         sts_end_write(hashtable->batches[i].inner_tuples);
    3165             122 :         sts_end_write(hashtable->batches[i].outer_tuples);
    3166             122 :         sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3167             122 :         sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3168                 :     }
    3169 CBC          34 :     pfree(hashtable->batches);
    3170              34 :     hashtable->batches = NULL;
    3171 GIC          34 : }
    3172 ECB             : 
    3173                 : /*
    3174                 :  * Make sure this backend has up-to-date accessors for the current set of
    3175                 :  * batches.
    3176                 :  */
    3177                 : static void
    3178 CBC         455 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
    3179 ECB             : {
    3180 GIC         455 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3181 ECB             :     ParallelHashJoinBatch *batches;
    3182                 :     MemoryContext oldcxt;
    3183                 :     int         i;
    3184                 : 
    3185 GIC         455 :     if (hashtable->batches != NULL)
    3186 ECB             :     {
    3187 GIC         346 :         if (hashtable->nbatch == pstate->nbatch)
    3188             341 :             return;
    3189               5 :         ExecParallelHashCloseBatchAccessors(hashtable);
    3190                 :     }
    3191                 : 
    3192                 :     /*
    3193                 :      * We should never see a state where the batch-tracking array is freed,
    3194                 :      * because we should have given up sooner if we join when the build
    3195                 :      * barrier has reached the PHJ_BUILD_FREE phase.
    3196                 :      */
    3197 CBC         114 :     Assert(DsaPointerIsValid(pstate->batches));
    3198                 : 
    3199                 :     /* Use hash join memory context. */
    3200 GIC         114 :     oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
    3201                 : 
    3202 ECB             :     /* Allocate this backend's accessor array. */
    3203 GIC         114 :     hashtable->nbatch = pstate->nbatch;
    3204 GNC         114 :     hashtable->batches =
    3205             114 :         palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
    3206 ECB             : 
    3207                 :     /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
    3208                 :     batches = (ParallelHashJoinBatch *)
    3209 CBC         114 :         dsa_get_address(hashtable->area, pstate->batches);
    3210                 : 
    3211                 :     /* Set up the accessor array and attach to the tuplestores. */
    3212             698 :     for (i = 0; i < hashtable->nbatch; ++i)
    3213                 :     {
    3214 GIC         584 :         ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
    3215 CBC         584 :         ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
    3216 ECB             : 
    3217 CBC         584 :         accessor->shared = shared;
    3218 GIC         584 :         accessor->preallocated = 0;
    3219             584 :         accessor->done = false;
    3220 GNC         584 :         accessor->outer_eof = false;
    3221 CBC         584 :         accessor->inner_tuples =
    3222 GIC         584 :             sts_attach(ParallelHashJoinBatchInner(shared),
    3223 ECB             :                        ParallelWorkerNumber + 1,
    3224                 :                        &pstate->fileset);
    3225 GIC         584 :         accessor->outer_tuples =
    3226             584 :             sts_attach(ParallelHashJoinBatchOuter(shared,
    3227                 :                                                   pstate->nparticipants),
    3228                 :                        ParallelWorkerNumber + 1,
    3229                 :                        &pstate->fileset);
    3230                 :     }
    3231 ECB             : 
    3232 CBC         114 :     MemoryContextSwitchTo(oldcxt);
    3233                 : }
    3234                 : 
    3235 ECB             : /*
    3236                 :  * Allocate an empty shared memory hash table for a given batch.
    3237                 :  */
    3238                 : void
    3239 GIC         408 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
    3240                 : {
    3241             408 :     ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
    3242 ECB             :     dsa_pointer_atomic *buckets;
    3243 GIC         408 :     int         nbuckets = hashtable->parallel_state->nbuckets;
    3244                 :     int         i;
    3245 ECB             : 
    3246 CBC         408 :     batch->buckets =
    3247             408 :         dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
    3248                 :     buckets = (dsa_pointer_atomic *)
    3249 GIC         408 :         dsa_get_address(hashtable->area, batch->buckets);
    3250         1611160 :     for (i = 0; i < nbuckets; ++i)
    3251         1610752 :         dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
    3252             408 : }
    3253                 : 
    3254 ECB             : /*
    3255                 :  * If we are currently attached to a shared hash join batch, detach.  If we
    3256                 :  * are last to detach, clean up.
    3257                 :  */
    3258                 : void
    3259 GIC        9912 : ExecHashTableDetachBatch(HashJoinTable hashtable)
    3260                 : {
    3261            9912 :     if (hashtable->parallel_state != NULL &&
    3262             677 :         hashtable->curbatch >= 0)
    3263                 :     {
    3264             488 :         int         curbatch = hashtable->curbatch;
    3265             488 :         ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
    3266 GNC         488 :         bool        attached = true;
    3267 ECB             : 
    3268                 :         /* Make sure any temporary files are closed. */
    3269 GIC         488 :         sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
    3270             488 :         sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
    3271                 : 
    3272                 :         /* After attaching we always get at least to PHJ_BATCH_PROBE. */
    3273 GNC         488 :         Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
    3274                 :                BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
    3275                 : 
    3276                 :         /*
    3277                 :          * If we're abandoning the PHJ_BATCH_PROBE phase early without having
    3278                 :          * reached the end of it, it means the plan doesn't want any more
    3279                 :          * tuples, and it is happy to abandon any tuples buffered in this
    3280                 :          * process's subplans.  For correctness, we can't allow any process to
    3281                 :          * execute the PHJ_BATCH_SCAN phase, because we will never have the
    3282                 :          * complete set of match bits.  Therefore we skip emitting unmatched
    3283                 :          * tuples in all backends (if this is a full/right join), as if those
    3284                 :          * tuples were all due to be emitted by this process and it has
    3285                 :          * abandoned them too.
    3286                 :          */
    3287             488 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
    3288             456 :             !hashtable->batches[curbatch].outer_eof)
    3289 ECB             :         {
    3290                 :             /*
    3291                 :              * This flag may be written to by multiple backends during
    3292                 :              * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
    3293                 :              * phase so requires no extra locking.
    3294                 :              */
    3295 UNC           0 :             batch->skip_unmatched = true;
    3296                 :         }
    3297                 : 
    3298                 :         /*
    3299                 :          * Even if we aren't doing a full/right outer join, we'll step through
    3300                 :          * the PHJ_BATCH_SCAN phase just to maintain the invariant that
    3301                 :          * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
    3302                 :          */
    3303 GNC         488 :         if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
    3304             456 :             attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
    3305             488 :         if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
    3306                 :         {
    3307                 :             /*
    3308                 :              * We are not longer attached to the batch barrier, but we're the
    3309                 :              * process that was chosen to free resources and it's safe to
    3310                 :              * assert the current phase.  The ParallelHashJoinBatch can't go
    3311                 :              * away underneath us while we are attached to the build barrier,
    3312                 :              * making this access safe.
    3313                 :              */
    3314             408 :             Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
    3315 ECB             : 
    3316                 :             /* Free shared chunks and buckets. */
    3317 CBC        1985 :             while (DsaPointerIsValid(batch->chunks))
    3318 ECB             :             {
    3319                 :                 HashMemoryChunk chunk =
    3320 CBC        1577 :                 dsa_get_address(hashtable->area, batch->chunks);
    3321            1577 :                 dsa_pointer next = chunk->next.shared;
    3322 ECB             : 
    3323 GIC        1577 :                 dsa_free(hashtable->area, batch->chunks);
    3324            1577 :                 batch->chunks = next;
    3325                 :             }
    3326             408 :             if (DsaPointerIsValid(batch->buckets))
    3327                 :             {
    3328             408 :                 dsa_free(hashtable->area, batch->buckets);
    3329 CBC         408 :                 batch->buckets = InvalidDsaPointer;
    3330                 :             }
    3331 ECB             :         }
    3332                 : 
    3333                 :         /*
    3334                 :          * Track the largest batch we've been attached to.  Though each
    3335                 :          * backend might see a different subset of batches, explain.c will
    3336                 :          * scan the results from all backends to find the largest value.
    3337                 :          */
    3338 CBC         488 :         hashtable->spacePeak =
    3339             488 :             Max(hashtable->spacePeak,
    3340 ECB             :                 batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
    3341                 : 
    3342                 :         /* Remember that we are not attached to a batch. */
    3343 GIC         488 :         hashtable->curbatch = -1;
    3344                 :     }
    3345            9912 : }
    3346                 : 
    3347                 : /*
    3348 ECB             :  * Detach from all shared resources.  If we are last to detach, clean up.
    3349                 :  */
    3350                 : void
    3351 CBC        9424 : ExecHashTableDetach(HashJoinTable hashtable)
    3352                 : {
    3353 GIC        9424 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3354 ECB             : 
    3355                 :     /*
    3356                 :      * If we're involved in a parallel query, we must either have gotten all
    3357                 :      * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
    3358                 :      */
    3359 CBC        9424 :     Assert(!pstate ||
    3360                 :            BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
    3361                 : 
    3362 GNC        9424 :     if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
    3363                 :     {
    3364 ECB             :         int         i;
    3365                 : 
    3366                 :         /* Make sure any temporary files are closed. */
    3367 CBC         189 :         if (hashtable->batches)
    3368 ECB             :         {
    3369 CBC        1137 :             for (i = 0; i < hashtable->nbatch; ++i)
    3370 ECB             :             {
    3371 CBC         948 :                 sts_end_write(hashtable->batches[i].inner_tuples);
    3372             948 :                 sts_end_write(hashtable->batches[i].outer_tuples);
    3373 GIC         948 :                 sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
    3374             948 :                 sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
    3375 ECB             :             }
    3376                 :         }
    3377                 : 
    3378                 :         /* If we're last to detach, clean up shared memory. */
    3379 GIC         189 :         if (BarrierArriveAndDetach(&pstate->build_barrier))
    3380                 :         {
    3381                 :             /*
    3382 ECB             :              * Late joining processes will see this state and give up
    3383                 :              * immediately.
    3384                 :              */
    3385 GNC          81 :             Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
    3386                 : 
    3387 GIC          81 :             if (DsaPointerIsValid(pstate->batches))
    3388                 :             {
    3389 CBC          81 :                 dsa_free(hashtable->area, pstate->batches);
    3390 GIC          81 :                 pstate->batches = InvalidDsaPointer;
    3391 ECB             :             }
    3392                 :         }
    3393                 :     }
    3394 GIC        9424 :     hashtable->parallel_state = NULL;
    3395            9424 : }
    3396 ECB             : 
    3397                 : /*
    3398                 :  * Get the first tuple in a given bucket identified by number.
    3399                 :  */
    3400                 : static inline HashJoinTuple
    3401 CBC     1374924 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
    3402 ECB             : {
    3403                 :     HashJoinTuple tuple;
    3404                 :     dsa_pointer p;
    3405                 : 
    3406 GIC     1374924 :     Assert(hashtable->parallel_state);
    3407         1374924 :     p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
    3408         1374924 :     tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
    3409 ECB             : 
    3410 GIC     1374924 :     return tuple;
    3411 ECB             : }
    3412                 : 
    3413                 : /*
    3414                 :  * Get the next tuple in the same bucket as 'tuple'.
    3415                 :  */
    3416                 : static inline HashJoinTuple
    3417 GIC     1900052 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
    3418                 : {
    3419 ECB             :     HashJoinTuple next;
    3420                 : 
    3421 GIC     1900052 :     Assert(hashtable->parallel_state);
    3422         1900052 :     next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
    3423 ECB             : 
    3424 GIC     1900052 :     return next;
    3425                 : }
    3426                 : 
    3427                 : /*
    3428                 :  * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
    3429                 :  */
    3430                 : static inline void
    3431         1471484 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
    3432                 :                           HashJoinTuple tuple,
    3433                 :                           dsa_pointer tuple_shared)
    3434                 : {
    3435                 :     for (;;)
    3436                 :     {
    3437 CBC     1481942 :         tuple->next.shared = dsa_pointer_atomic_read(head);
    3438         1481942 :         if (dsa_pointer_atomic_compare_exchange(head,
    3439 GIC     1481942 :                                                 &tuple->next.shared,
    3440                 :                                                 tuple_shared))
    3441         1471484 :             break;
    3442                 :     }
    3443         1471484 : }
    3444                 : 
    3445 EUB             : /*
    3446                 :  * Prepare to work on a given batch.
    3447                 :  */
    3448                 : void
    3449 GIC        1109 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
    3450                 : {
    3451            1109 :     Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
    3452                 : 
    3453 CBC        1109 :     hashtable->curbatch = batchno;
    3454            1109 :     hashtable->buckets.shared = (dsa_pointer_atomic *)
    3455            1109 :         dsa_get_address(hashtable->area,
    3456 GIC        1109 :                         hashtable->batches[batchno].shared->buckets);
    3457            1109 :     hashtable->nbuckets = hashtable->parallel_state->nbuckets;
    3458            1109 :     hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
    3459            1109 :     hashtable->current_chunk = NULL;
    3460            1109 :     hashtable->current_chunk_shared = InvalidDsaPointer;
    3461            1109 :     hashtable->batches[batchno].at_least_one_chunk = false;
    3462            1109 : }
    3463                 : 
    3464 ECB             : /*
    3465                 :  * Take the next available chunk from the queue of chunks being worked on in
    3466                 :  * parallel.  Return NULL if there are none left.  Otherwise return a pointer
    3467                 :  * to the chunk, and set *shared to the DSA pointer to the chunk.
    3468                 :  */
    3469                 : static HashMemoryChunk
    3470 CBC         593 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
    3471 ECB             : {
    3472 GIC         593 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3473 ECB             :     HashMemoryChunk chunk;
    3474                 : 
    3475 GIC         593 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3476 CBC         593 :     if (DsaPointerIsValid(pstate->chunk_work_queue))
    3477                 :     {
    3478             491 :         *shared = pstate->chunk_work_queue;
    3479 ECB             :         chunk = (HashMemoryChunk)
    3480 GIC         491 :             dsa_get_address(hashtable->area, *shared);
    3481             491 :         pstate->chunk_work_queue = chunk->next.shared;
    3482                 :     }
    3483                 :     else
    3484             102 :         chunk = NULL;
    3485             593 :     LWLockRelease(&pstate->lock);
    3486                 : 
    3487             593 :     return chunk;
    3488 ECB             : }
    3489                 : 
    3490                 : /*
    3491                 :  * Increase the space preallocated in this backend for a given inner batch by
    3492                 :  * at least a given amount.  This allows us to track whether a given batch
    3493                 :  * would fit in memory when loaded back in.  Also increase the number of
    3494                 :  * batches or buckets if required.
    3495                 :  *
    3496                 :  * This maintains a running estimation of how much space will be taken when we
    3497                 :  * load the batch back into memory by simulating the way chunks will be handed
    3498                 :  * out to workers.  It's not perfectly accurate because the tuples will be
    3499                 :  * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
    3500                 :  * it should be pretty close.  It tends to overestimate by a fraction of a
    3501                 :  * chunk per worker since all workers gang up to preallocate during hashing,
    3502                 :  * but workers tend to reload batches alone if there are enough to go around,
    3503                 :  * leaving fewer partially filled chunks.  This effect is bounded by
    3504                 :  * nparticipants.
    3505                 :  *
    3506                 :  * Return false if the number of batches or buckets has changed, and the
    3507                 :  * caller should reconsider which batch a given tuple now belongs in and call
    3508                 :  * again.
    3509                 :  */
    3510                 : static bool
    3511 GIC         865 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
    3512 ECB             : {
    3513 GIC         865 :     ParallelHashJoinState *pstate = hashtable->parallel_state;
    3514             865 :     ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
    3515             865 :     size_t      want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
    3516                 : 
    3517 CBC         865 :     Assert(batchno > 0);
    3518 GIC         865 :     Assert(batchno < hashtable->nbatch);
    3519 CBC         865 :     Assert(size == MAXALIGN(size));
    3520                 : 
    3521             865 :     LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
    3522 ECB             : 
    3523                 :     /* Has another participant commanded us to help grow? */
    3524 CBC         865 :     if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
    3525 GIC         855 :         pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3526                 :     {
    3527              10 :         ParallelHashGrowth growth = pstate->growth;
    3528                 : 
    3529 CBC          10 :         LWLockRelease(&pstate->lock);
    3530 GIC          10 :         if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
    3531              10 :             ExecParallelHashIncreaseNumBatches(hashtable);
    3532 UIC           0 :         else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
    3533               0 :             ExecParallelHashIncreaseNumBuckets(hashtable);
    3534                 : 
    3535 CBC          10 :         return false;
    3536                 :     }
    3537 ECB             : 
    3538 GIC         855 :     if (pstate->growth != PHJ_GROWTH_DISABLED &&
    3539 CBC         740 :         batch->at_least_one_chunk &&
    3540             278 :         (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
    3541 GIC         278 :          > pstate->space_allowed))
    3542                 :     {
    3543                 :         /*
    3544 ECB             :          * We have determined that this batch would exceed the space budget if
    3545                 :          * loaded into memory.  Command all participants to help repartition.
    3546                 :          */
    3547 GIC           9 :         batch->shared->space_exhausted = true;
    3548               9 :         pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
    3549               9 :         LWLockRelease(&pstate->lock);
    3550                 : 
    3551 CBC           9 :         return false;
    3552                 :     }
    3553                 : 
    3554 GIC         846 :     batch->at_least_one_chunk = true;
    3555             846 :     batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
    3556 CBC         846 :     batch->preallocated = want;
    3557             846 :     LWLockRelease(&pstate->lock);
    3558 ECB             : 
    3559 GIC         846 :     return true;
    3560 ECB             : }
    3561                 : 
    3562                 : /*
    3563                 :  * Calculate the limit on how much memory can be used by Hash and similar
    3564                 :  * plan types.  This is work_mem times hash_mem_multiplier, and is
    3565                 :  * expressed in bytes.
    3566                 :  *
    3567                 :  * Exported for use by the planner, as well as other hash-like executor
    3568                 :  * nodes.  This is a rather random place for this, but there is no better
    3569                 :  * place.
    3570                 :  */
    3571                 : size_t
    3572 CBC      469967 : get_hash_memory_limit(void)
    3573                 : {
    3574 ECB             :     double      mem_limit;
    3575                 : 
    3576                 :     /* Do initial calculation in double arithmetic */
    3577 GIC      469967 :     mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
    3578                 : 
    3579                 :     /* Clamp in case it doesn't fit in size_t */
    3580          469967 :     mem_limit = Min(mem_limit, (double) SIZE_MAX);
    3581 ECB             : 
    3582 GIC      469967 :     return (size_t) mem_limit;
    3583                 : }
        

Generated by: LCOV version v1.16-55-g56c0a2a