Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * nodeHash.c
4 : : * Routines to hash relations for hashjoin
5 : : *
6 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/executor/nodeHash.c
12 : : *
13 : : * See note on parallelism in nodeHashjoin.c.
14 : : *
15 : : *-------------------------------------------------------------------------
16 : : */
17 : : /*
18 : : * INTERFACE ROUTINES
19 : : * MultiExecHash - generate an in-memory hash table of the relation
20 : : * ExecInitHash - initialize node and subnodes
21 : : * ExecEndHash - shutdown node and subnodes
22 : : */
23 : :
24 : : #include "postgres.h"
25 : :
26 : : #include <math.h>
27 : : #include <limits.h>
28 : :
29 : : #include "access/htup_details.h"
30 : : #include "access/parallel.h"
31 : : #include "catalog/pg_statistic.h"
32 : : #include "commands/tablespace.h"
33 : : #include "executor/executor.h"
34 : : #include "executor/hashjoin.h"
35 : : #include "executor/nodeHash.h"
36 : : #include "executor/nodeHashjoin.h"
37 : : #include "miscadmin.h"
38 : : #include "port/atomics.h"
39 : : #include "port/pg_bitutils.h"
40 : : #include "utils/dynahash.h"
41 : : #include "utils/lsyscache.h"
42 : : #include "utils/memutils.h"
43 : : #include "utils/syscache.h"
44 : : #include "utils/wait_event.h"
45 : :
46 : : static void ExecHashIncreaseNumBatches(HashJoinTable hashtable);
47 : : static void ExecHashIncreaseNumBuckets(HashJoinTable hashtable);
48 : : static void ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable);
49 : : static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable);
50 : : static void ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node,
51 : : int mcvsToUse);
52 : : static void ExecHashSkewTableInsert(HashJoinTable hashtable,
53 : : TupleTableSlot *slot,
54 : : uint32 hashvalue,
55 : : int bucketNumber);
56 : : static void ExecHashRemoveNextSkewBucket(HashJoinTable hashtable);
57 : :
58 : : static void *dense_alloc(HashJoinTable hashtable, Size size);
59 : : static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable,
60 : : size_t size,
61 : : dsa_pointer *shared);
62 : : static void MultiExecPrivateHash(HashState *node);
63 : : static void MultiExecParallelHash(HashState *node);
64 : : static inline HashJoinTuple ExecParallelHashFirstTuple(HashJoinTable hashtable,
65 : : int bucketno);
66 : : static inline HashJoinTuple ExecParallelHashNextTuple(HashJoinTable hashtable,
67 : : HashJoinTuple tuple);
68 : : static inline void ExecParallelHashPushTuple(dsa_pointer_atomic *head,
69 : : HashJoinTuple tuple,
70 : : dsa_pointer tuple_shared);
71 : : static void ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch);
72 : : static void ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable);
73 : : static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable);
74 : : static void ExecParallelHashRepartitionRest(HashJoinTable hashtable);
75 : : static HashMemoryChunk ExecParallelHashPopChunkQueue(HashJoinTable hashtable,
76 : : dsa_pointer *shared);
77 : : static bool ExecParallelHashTuplePrealloc(HashJoinTable hashtable,
78 : : int batchno,
79 : : size_t size);
80 : : static void ExecParallelHashMergeCounters(HashJoinTable hashtable);
81 : : static void ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable);
82 : :
83 : :
84 : : /* ----------------------------------------------------------------
85 : : * ExecHash
86 : : *
87 : : * stub for pro forma compliance
88 : : * ----------------------------------------------------------------
89 : : */
90 : : static TupleTableSlot *
2463 andres@anarazel.de 91 :UBC 0 : ExecHash(PlanState *pstate)
92 : : {
6776 tgl@sss.pgh.pa.us 93 [ # # ]: 0 : elog(ERROR, "Hash node does not support ExecProcNode call convention");
94 : : return NULL;
95 : : }
96 : :
97 : : /* ----------------------------------------------------------------
98 : : * MultiExecHash
99 : : *
100 : : * build hash table for hashjoin, doing partitioning if more
101 : : * than one batch is required.
102 : : * ----------------------------------------------------------------
103 : : */
104 : : Node *
6938 tgl@sss.pgh.pa.us 105 :CBC 10403 : MultiExecHash(HashState *node)
106 : : {
107 : : /* must provide our own instrumentation support */
2307 andres@anarazel.de 108 [ + + ]: 10403 : if (node->ps.instrument)
109 : 165 : InstrStartNode(node->ps.instrument);
110 : :
111 [ + + ]: 10403 : if (node->parallel_state != NULL)
112 : 198 : MultiExecParallelHash(node);
113 : : else
114 : 10205 : MultiExecPrivateHash(node);
115 : :
116 : : /* must provide our own instrumentation support */
117 [ + + ]: 10401 : if (node->ps.instrument)
118 : 165 : InstrStopNode(node->ps.instrument, node->hashtable->partialTuples);
119 : :
120 : : /*
121 : : * We do not return the hash table directly because it's not a subtype of
122 : : * Node, and so would violate the MultiExecProcNode API. Instead, our
123 : : * parent Hashjoin node is expected to know how to fish it out of our node
124 : : * state. Ugly but not really worth cleaning up, since Hashjoin knows
125 : : * quite a bit more about Hash besides that.
126 : : */
127 : 10401 : return NULL;
128 : : }
129 : :
130 : : /* ----------------------------------------------------------------
131 : : * MultiExecPrivateHash
132 : : *
133 : : * parallel-oblivious version, building a backend-private
134 : : * hash table and (if necessary) batch files.
135 : : * ----------------------------------------------------------------
136 : : */
137 : : static void
138 : 10205 : MultiExecPrivateHash(HashState *node)
139 : : {
140 : : PlanState *outerNode;
141 : : List *hashkeys;
142 : : HashJoinTable hashtable;
143 : : TupleTableSlot *slot;
144 : : ExprContext *econtext;
145 : : uint32 hashvalue;
146 : :
147 : : /*
148 : : * get state info from node
149 : : */
7801 tgl@sss.pgh.pa.us 150 : 10205 : outerNode = outerPlanState(node);
151 : 10205 : hashtable = node->hashtable;
152 : :
153 : : /*
154 : : * set expression context
155 : : */
7793 156 : 10205 : hashkeys = node->hashkeys;
7801 157 : 10205 : econtext = node->ps.ps_ExprContext;
158 : :
159 : : /*
160 : : * Get all tuples from the node below the Hash node and insert into the
161 : : * hash table (or temp files).
162 : : */
163 : : for (;;)
164 : : {
6776 165 : 4396404 : slot = ExecProcNode(outerNode);
166 [ + + + + ]: 4396402 : if (TupIsNull(slot))
167 : : break;
168 : : /* We have to compute the hash value */
1717 andres@anarazel.de 169 : 4386199 : econtext->ecxt_outertuple = slot;
4854 tgl@sss.pgh.pa.us 170 [ + + ]: 4386199 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
171 : 4386199 : false, hashtable->keepNulls,
172 : : &hashvalue))
173 : : {
174 : : int bucketNumber;
175 : :
5503 176 : 4386190 : bucketNumber = ExecHashGetSkewBucket(hashtable, hashvalue);
177 [ + + ]: 4386190 : if (bucketNumber != INVALID_SKEW_BUCKET_NO)
178 : : {
179 : : /* It's a skew tuple, so put it into that hash table */
180 : 294 : ExecHashSkewTableInsert(hashtable, slot, hashvalue,
181 : : bucketNumber);
3471 kgrittn@postgresql.o 182 : 294 : hashtable->skewTuples += 1;
183 : : }
184 : : else
185 : : {
186 : : /* Not subject to skew optimization, so insert normally */
5503 tgl@sss.pgh.pa.us 187 : 4385896 : ExecHashTableInsert(hashtable, slot, hashvalue);
188 : : }
6286 189 : 4386190 : hashtable->totalTuples += 1;
190 : : }
191 : : }
192 : :
193 : : /* resize the hash table if needed (NTUP_PER_BUCKET exceeded) */
3471 kgrittn@postgresql.o 194 [ + + ]: 10203 : if (hashtable->nbuckets != hashtable->nbuckets_optimal)
195 : 57 : ExecHashIncreaseNumBuckets(hashtable);
196 : :
197 : : /* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
198 : 10203 : hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
199 [ + + ]: 10203 : if (hashtable->spaceUsed > hashtable->spacePeak)
200 : 10179 : hashtable->spacePeak = hashtable->spaceUsed;
201 : :
2307 andres@anarazel.de 202 : 10203 : hashtable->partialTuples = hashtable->totalTuples;
203 : 10203 : }
204 : :
205 : : /* ----------------------------------------------------------------
206 : : * MultiExecParallelHash
207 : : *
208 : : * parallel-aware version, building a shared hash table and
209 : : * (if necessary) batch files using the combined effort of
210 : : * a set of co-operating backends.
211 : : * ----------------------------------------------------------------
212 : : */
213 : : static void
214 : 198 : MultiExecParallelHash(HashState *node)
215 : : {
216 : : ParallelHashJoinState *pstate;
217 : : PlanState *outerNode;
218 : : List *hashkeys;
219 : : HashJoinTable hashtable;
220 : : TupleTableSlot *slot;
221 : : ExprContext *econtext;
222 : : uint32 hashvalue;
223 : : Barrier *build_barrier;
224 : : int i;
225 : :
226 : : /*
227 : : * get state info from node
228 : : */
229 : 198 : outerNode = outerPlanState(node);
230 : 198 : hashtable = node->hashtable;
231 : :
232 : : /*
233 : : * set expression context
234 : : */
235 : 198 : hashkeys = node->hashkeys;
236 : 198 : econtext = node->ps.ps_ExprContext;
237 : :
238 : : /*
239 : : * Synchronize the parallel hash table build. At this stage we know that
240 : : * the shared hash table has been or is being set up by
241 : : * ExecHashTableCreate(), but we don't know if our peers have returned
242 : : * from there or are here in MultiExecParallelHash(), and if so how far
243 : : * through they are. To find out, we check the build_barrier phase then
244 : : * and jump to the right step in the build algorithm.
245 : : */
246 : 198 : pstate = hashtable->parallel_state;
247 : 198 : build_barrier = &pstate->build_barrier;
388 tmunro@postgresql.or 248 [ - + ]: 198 : Assert(BarrierPhase(build_barrier) >= PHJ_BUILD_ALLOCATE);
2307 andres@anarazel.de 249 [ + + + ]: 198 : switch (BarrierPhase(build_barrier))
250 : : {
388 tmunro@postgresql.or 251 : 93 : case PHJ_BUILD_ALLOCATE:
252 : :
253 : : /*
254 : : * Either I just allocated the initial hash table in
255 : : * ExecHashTableCreate(), or someone else is doing that. Either
256 : : * way, wait for everyone to arrive here so we can proceed.
257 : : */
1429 tgl@sss.pgh.pa.us 258 : 93 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATE);
259 : : /* Fall through. */
260 : :
388 tmunro@postgresql.or 261 : 177 : case PHJ_BUILD_HASH_INNER:
262 : :
263 : : /*
264 : : * It's time to begin hashing, or if we just arrived here then
265 : : * hashing is already underway, so join in that effort. While
266 : : * hashing we have to be prepared to help increase the number of
267 : : * batches or buckets at any time, and if we arrived here when
268 : : * that was already underway we'll have to help complete that work
269 : : * immediately so that it's safe to access batches and buckets
270 : : * below.
271 : : */
2307 andres@anarazel.de 272 [ + + ]: 177 : if (PHJ_GROW_BATCHES_PHASE(BarrierAttach(&pstate->grow_batches_barrier)) !=
273 : : PHJ_GROW_BATCHES_ELECT)
274 : 4 : ExecParallelHashIncreaseNumBatches(hashtable);
275 [ + + ]: 177 : if (PHJ_GROW_BUCKETS_PHASE(BarrierAttach(&pstate->grow_buckets_barrier)) !=
276 : : PHJ_GROW_BUCKETS_ELECT)
277 : 1 : ExecParallelHashIncreaseNumBuckets(hashtable);
278 : 177 : ExecParallelHashEnsureBatchAccessors(hashtable);
279 : 177 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
280 : : for (;;)
281 : : {
282 : 1080240 : slot = ExecProcNode(outerNode);
283 [ + + + + ]: 1080240 : if (TupIsNull(slot))
284 : : break;
1717 285 : 1080063 : econtext->ecxt_outertuple = slot;
2307 286 [ + - ]: 1080063 : if (ExecHashGetHashValue(hashtable, econtext, hashkeys,
287 : 1080063 : false, hashtable->keepNulls,
288 : : &hashvalue))
289 : 1080063 : ExecParallelHashTableInsert(hashtable, slot, hashvalue);
290 : 1080063 : hashtable->partialTuples++;
291 : : }
292 : :
293 : : /*
294 : : * Make sure that any tuples we wrote to disk are visible to
295 : : * others before anyone tries to load them.
296 : : */
297 [ + + ]: 1053 : for (i = 0; i < hashtable->nbatch; ++i)
298 : 876 : sts_end_write(hashtable->batches[i].inner_tuples);
299 : :
300 : : /*
301 : : * Update shared counters. We need an accurate total tuple count
302 : : * to control the empty table optimization.
303 : : */
304 : 177 : ExecParallelHashMergeCounters(hashtable);
305 : :
2299 306 : 177 : BarrierDetach(&pstate->grow_buckets_barrier);
307 : 177 : BarrierDetach(&pstate->grow_batches_barrier);
308 : :
309 : : /*
310 : : * Wait for everyone to finish building and flushing files and
311 : : * counters.
312 : : */
2307 313 [ + + ]: 177 : if (BarrierArriveAndWait(build_barrier,
314 : : WAIT_EVENT_HASH_BUILD_HASH_INNER))
315 : : {
316 : : /*
317 : : * Elect one backend to disable any further growth. Batches
318 : : * are now fixed. While building them we made sure they'd fit
319 : : * in our memory budget when we load them back in later (or we
320 : : * tried to do that and gave up because we detected extreme
321 : : * skew).
322 : : */
323 : 84 : pstate->growth = PHJ_GROWTH_DISABLED;
324 : : }
325 : : }
326 : :
327 : : /*
328 : : * We're not yet attached to a batch. We all agree on the dimensions and
329 : : * number of inner tuples (for the empty table optimization).
330 : : */
331 : 198 : hashtable->curbatch = -1;
332 : 198 : hashtable->nbuckets = pstate->nbuckets;
333 : 198 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
334 : 198 : hashtable->totalTuples = pstate->total_tuples;
335 : :
336 : : /*
337 : : * Unless we're completely done and the batch state has been freed, make
338 : : * sure we have accessors.
339 : : */
388 tmunro@postgresql.or 340 [ + - ]: 198 : if (BarrierPhase(build_barrier) < PHJ_BUILD_FREE)
390 341 : 198 : ExecParallelHashEnsureBatchAccessors(hashtable);
342 : :
343 : : /*
344 : : * The next synchronization point is in ExecHashJoin's HJ_BUILD_HASHTABLE
345 : : * case, which will bring the build phase to PHJ_BUILD_RUN (if it isn't
346 : : * there already).
347 : : */
388 348 [ + + - + : 198 : Assert(BarrierPhase(build_barrier) == PHJ_BUILD_HASH_OUTER ||
- - ]
349 : : BarrierPhase(build_barrier) == PHJ_BUILD_RUN ||
350 : : BarrierPhase(build_barrier) == PHJ_BUILD_FREE);
10141 scrappy@hub.org 351 : 198 : }
352 : :
353 : : /* ----------------------------------------------------------------
354 : : * ExecInitHash
355 : : *
356 : : * Init routine for Hash node
357 : : * ----------------------------------------------------------------
358 : : */
359 : : HashState *
6620 tgl@sss.pgh.pa.us 360 : 14898 : ExecInitHash(Hash *node, EState *estate, int eflags)
361 : : {
362 : : HashState *hashstate;
363 : :
364 : : /* check for unsupported flags */
365 [ - + ]: 14898 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
366 : :
367 : : /*
368 : : * create state structure
369 : : */
9716 bruce@momjian.us 370 : 14898 : hashstate = makeNode(HashState);
7801 tgl@sss.pgh.pa.us 371 : 14898 : hashstate->ps.plan = (Plan *) node;
372 : 14898 : hashstate->ps.state = estate;
2463 andres@anarazel.de 373 : 14898 : hashstate->ps.ExecProcNode = ExecHash;
9098 tgl@sss.pgh.pa.us 374 : 14898 : hashstate->hashtable = NULL;
7446 375 : 14898 : hashstate->hashkeys = NIL; /* will be set by parent HashJoin */
376 : :
377 : : /*
378 : : * Miscellaneous initialization
379 : : *
380 : : * create expression context for node
381 : : */
7801 382 : 14898 : ExecAssignExprContext(estate, &hashstate->ps);
383 : :
384 : : /*
385 : : * initialize child nodes
386 : : */
6620 387 : 14898 : outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate, eflags);
388 : :
389 : : /*
390 : : * initialize our result slot and type. No need to build projection
391 : : * because this node doesn't do projections.
392 : : */
1977 andres@anarazel.de 393 : 14898 : ExecInitResultTupleSlotTL(&hashstate->ps, &TTSOpsMinimalTuple);
7801 tgl@sss.pgh.pa.us 394 : 14898 : hashstate->ps.ps_ProjInfo = NULL;
395 : :
396 : : /*
397 : : * initialize child expressions
398 : : */
1717 andres@anarazel.de 399 [ - + ]: 14898 : Assert(node->plan.qual == NIL);
400 : 14898 : hashstate->hashkeys =
401 : 14898 : ExecInitExprList(node->hashkeys, (PlanState *) hashstate);
402 : :
7801 tgl@sss.pgh.pa.us 403 : 14898 : return hashstate;
404 : : }
405 : :
406 : : /* ---------------------------------------------------------------
407 : : * ExecEndHash
408 : : *
409 : : * clean up routine for Hash node
410 : : * ----------------------------------------------------------------
411 : : */
412 : : void
413 : 14843 : ExecEndHash(HashState *node)
414 : : {
415 : : PlanState *outerPlan;
416 : :
417 : : /*
418 : : * shut down the subplan
419 : : */
420 : 14843 : outerPlan = outerPlanState(node);
421 : 14843 : ExecEndNode(outerPlan);
9716 bruce@momjian.us 422 : 14843 : }
423 : :
424 : :
425 : : /* ----------------------------------------------------------------
426 : : * ExecHashTableCreate
427 : : *
428 : : * create an empty hashtable data structure for hashjoin.
429 : : * ----------------------------------------------------------------
430 : : */
431 : : HashJoinTable
1850 peter@eisentraut.org 432 : 10403 : ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, bool keepNulls)
433 : : {
434 : : Hash *node;
435 : : HashJoinTable hashtable;
436 : : Plan *outerNode;
437 : : size_t space_allowed;
438 : : int nbuckets;
439 : : int nbatch;
440 : : double rows;
441 : : int num_skew_mcvs;
442 : : int log2_nbuckets;
443 : : int nkeys;
444 : : int i;
445 : : ListCell *ho;
446 : : ListCell *hc;
447 : : MemoryContext oldcxt;
448 : :
449 : : /*
450 : : * Get information about the size of the relation to be hashed (it's the
451 : : * "outer" subtree of this node, but the inner relation of the hashjoin).
452 : : * Compute the appropriate size of the hash table.
453 : : */
2307 andres@anarazel.de 454 : 10403 : node = (Hash *) state->ps.plan;
9716 bruce@momjian.us 455 : 10403 : outerNode = outerPlan(node);
456 : :
457 : : /*
458 : : * If this is shared hash table with a partial plan, then we can't use
459 : : * outerNode->plan_rows to estimate its size. We need an estimate of the
460 : : * total number of rows across all copies of the partial plan.
461 : : */
2307 andres@anarazel.de 462 [ + + ]: 10403 : rows = node->plan.parallel_aware ? node->rows_total : outerNode->plan_rows;
463 : :
464 : 10205 : ExecChooseHashTableSize(rows, outerNode->plan_width,
5503 tgl@sss.pgh.pa.us 465 : 10403 : OidIsValid(node->skewTable),
2307 andres@anarazel.de 466 : 10403 : state->parallel_state != NULL,
467 [ + + ]: 10403 : state->parallel_state != NULL ?
468 : 198 : state->parallel_state->nparticipants - 1 : 0,
469 : : &space_allowed,
470 : : &nbuckets, &nbatch, &num_skew_mcvs);
471 : :
472 : : /* nbuckets must be a power of 2 */
6162 tgl@sss.pgh.pa.us 473 : 10403 : log2_nbuckets = my_log2(nbuckets);
474 [ - + ]: 10403 : Assert(nbuckets == (1 << log2_nbuckets));
475 : :
476 : : /*
477 : : * Initialize the hash table control block.
478 : : *
479 : : * The hashtable control block is just palloc'd from the executor's
480 : : * per-query memory context. Everything else should be kept inside the
481 : : * subsidiary hashCxt, batchCxt or spillCxt.
482 : : */
580 peter@eisentraut.org 483 : 10403 : hashtable = palloc_object(HashJoinTableData);
9716 bruce@momjian.us 484 : 10403 : hashtable->nbuckets = nbuckets;
3471 kgrittn@postgresql.o 485 : 10403 : hashtable->nbuckets_original = nbuckets;
486 : 10403 : hashtable->nbuckets_optimal = nbuckets;
6162 tgl@sss.pgh.pa.us 487 : 10403 : hashtable->log2_nbuckets = log2_nbuckets;
3471 kgrittn@postgresql.o 488 : 10403 : hashtable->log2_nbuckets_optimal = log2_nbuckets;
2307 andres@anarazel.de 489 : 10403 : hashtable->buckets.unshared = NULL;
4854 tgl@sss.pgh.pa.us 490 : 10403 : hashtable->keepNulls = keepNulls;
5503 491 : 10403 : hashtable->skewEnabled = false;
492 : 10403 : hashtable->skewBucket = NULL;
493 : 10403 : hashtable->skewBucketLen = 0;
494 : 10403 : hashtable->nSkewBuckets = 0;
495 : 10403 : hashtable->skewBucketNums = NULL;
9716 bruce@momjian.us 496 : 10403 : hashtable->nbatch = nbatch;
497 : 10403 : hashtable->curbatch = 0;
6979 tgl@sss.pgh.pa.us 498 : 10403 : hashtable->nbatch_original = nbatch;
499 : 10403 : hashtable->nbatch_outstart = nbatch;
500 : 10403 : hashtable->growEnabled = true;
6938 501 : 10403 : hashtable->totalTuples = 0;
2307 andres@anarazel.de 502 : 10403 : hashtable->partialTuples = 0;
3471 kgrittn@postgresql.o 503 : 10403 : hashtable->skewTuples = 0;
9098 tgl@sss.pgh.pa.us 504 : 10403 : hashtable->innerBatchFile = NULL;
505 : 10403 : hashtable->outerBatchFile = NULL;
6979 506 : 10403 : hashtable->spaceUsed = 0;
5186 rhaas@postgresql.org 507 : 10403 : hashtable->spacePeak = 0;
2307 andres@anarazel.de 508 : 10403 : hashtable->spaceAllowed = space_allowed;
5503 tgl@sss.pgh.pa.us 509 : 10403 : hashtable->spaceUsedSkew = 0;
510 : 10403 : hashtable->spaceAllowedSkew =
1355 pg@bowt.ie 511 : 10403 : hashtable->spaceAllowed * SKEW_HASH_MEM_PERCENT / 100;
3504 heikki.linnakangas@i 512 : 10403 : hashtable->chunks = NULL;
2307 andres@anarazel.de 513 : 10403 : hashtable->current_chunk = NULL;
514 : 10403 : hashtable->parallel_state = state->parallel_state;
515 : 10403 : hashtable->area = state->ps.state->es_query_dsa;
516 : 10403 : hashtable->batches = NULL;
517 : :
518 : : #ifdef HJDEBUG
519 : : printf("Hashjoin %p: initial nbatch = %d, nbuckets = %d\n",
520 : : hashtable, nbatch, nbuckets);
521 : : #endif
522 : :
523 : : /*
524 : : * Create temporary memory contexts in which to keep the hashtable working
525 : : * storage. See notes in executor/hashjoin.h.
526 : : */
2221 tgl@sss.pgh.pa.us 527 : 10403 : hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
528 : : "HashTableContext",
529 : : ALLOCSET_DEFAULT_SIZES);
530 : :
531 : 10403 : hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
532 : : "HashBatchContext",
533 : : ALLOCSET_DEFAULT_SIZES);
534 : :
331 tomas.vondra@postgre 535 : 10403 : hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
536 : : "HashSpillContext",
537 : : ALLOCSET_DEFAULT_SIZES);
538 : :
539 : : /* Allocate data that will live for the life of the hashjoin */
540 : :
2221 tgl@sss.pgh.pa.us 541 : 10403 : oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);
542 : :
543 : : /*
544 : : * Get info about the hash functions to be used for each hash key. Also
545 : : * remember whether the join operators are strict.
546 : : */
7259 neilc@samurai.com 547 : 10403 : nkeys = list_length(hashOperators);
580 peter@eisentraut.org 548 : 10403 : hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
549 : 10403 : hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
550 : 10403 : hashtable->hashStrict = palloc_array(bool, nkeys);
551 : 10403 : hashtable->collations = palloc_array(Oid, nkeys);
7806 tgl@sss.pgh.pa.us 552 : 10403 : i = 0;
1850 peter@eisentraut.org 553 [ + - + + : 21472 : forboth(ho, hashOperators, hc, hashCollations)
+ - + + +
+ + - +
+ ]
554 : : {
6286 tgl@sss.pgh.pa.us 555 : 11069 : Oid hashop = lfirst_oid(ho);
556 : : Oid left_hashfn;
557 : : Oid right_hashfn;
558 : :
6284 559 [ - + ]: 11069 : if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
7573 tgl@sss.pgh.pa.us 560 [ # # ]:UBC 0 : elog(ERROR, "could not find hash function for hash operator %u",
561 : : hashop);
6284 tgl@sss.pgh.pa.us 562 :CBC 11069 : fmgr_info(left_hashfn, &hashtable->outer_hashfunctions[i]);
563 : 11069 : fmgr_info(right_hashfn, &hashtable->inner_hashfunctions[i]);
6286 564 : 11069 : hashtable->hashStrict[i] = op_strict(hashop);
1850 peter@eisentraut.org 565 : 11069 : hashtable->collations[i] = lfirst_oid(hc);
7806 tgl@sss.pgh.pa.us 566 : 11069 : i++;
567 : : }
568 : :
2307 andres@anarazel.de 569 [ + + + + ]: 10403 : if (nbatch > 1 && hashtable->parallel_state == NULL)
570 : : {
571 : : MemoryContext oldctx;
572 : :
573 : : /*
574 : : * allocate and initialize the file arrays in hashCxt (not needed for
575 : : * parallel case which uses shared tuplestores instead of raw files)
576 : : */
331 tomas.vondra@postgre 577 : 63 : oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
578 : :
580 peter@eisentraut.org 579 : 63 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
580 : 63 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
581 : :
331 tomas.vondra@postgre 582 : 63 : MemoryContextSwitchTo(oldctx);
583 : :
584 : : /* The files will not be opened until needed... */
585 : : /* ... but make sure we have temp tablespaces established for them */
6156 tgl@sss.pgh.pa.us 586 : 63 : PrepareTempTablespaces();
587 : : }
588 : :
2307 andres@anarazel.de 589 : 10403 : MemoryContextSwitchTo(oldcxt);
590 : :
591 [ + + ]: 10403 : if (hashtable->parallel_state)
592 : : {
593 : 198 : ParallelHashJoinState *pstate = hashtable->parallel_state;
594 : : Barrier *build_barrier;
595 : :
596 : : /*
597 : : * Attach to the build barrier. The corresponding detach operation is
598 : : * in ExecHashTableDetach. Note that we won't attach to the
599 : : * batch_barrier for batch 0 yet. We'll attach later and start it out
600 : : * in PHJ_BATCH_PROBE phase, because batch 0 is allocated up front and
601 : : * then loaded while hashing (the standard hybrid hash join
602 : : * algorithm), and we'll coordinate that using build_barrier.
603 : : */
604 : 198 : build_barrier = &pstate->build_barrier;
605 : 198 : BarrierAttach(build_barrier);
606 : :
607 : : /*
608 : : * So far we have no idea whether there are any other participants,
609 : : * and if so, what phase they are working on. The only thing we care
610 : : * about at this point is whether someone has already created the
611 : : * SharedHashJoinBatch objects and the hash table for batch 0. One
612 : : * backend will be elected to do that now if necessary.
613 : : */
388 tmunro@postgresql.or 614 [ + + + - ]: 282 : if (BarrierPhase(build_barrier) == PHJ_BUILD_ELECT &&
1429 tgl@sss.pgh.pa.us 615 : 84 : BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ELECT))
616 : : {
2307 andres@anarazel.de 617 : 84 : pstate->nbatch = nbatch;
618 : 84 : pstate->space_allowed = space_allowed;
619 : 84 : pstate->growth = PHJ_GROWTH_OK;
620 : :
621 : : /* Set up the shared state for coordinating batches. */
622 : 84 : ExecParallelHashJoinSetUpBatches(hashtable, nbatch);
623 : :
624 : : /*
625 : : * Allocate batch 0's hash table up front so we can load it
626 : : * directly while hashing.
627 : : */
628 : 84 : pstate->nbuckets = nbuckets;
629 : 84 : ExecParallelHashTableAlloc(hashtable, 0);
630 : : }
631 : :
632 : : /*
633 : : * The next Parallel Hash synchronization point is in
634 : : * MultiExecParallelHash(), which will progress it all the way to
635 : : * PHJ_BUILD_RUN. The caller must not return control from this
636 : : * executor node between now and then.
637 : : */
638 : : }
639 : : else
640 : : {
641 : : /*
642 : : * Prepare context for the first-scan space allocations; allocate the
643 : : * hashbucket array therein, and set each bucket "empty".
644 : : */
645 : 10205 : MemoryContextSwitchTo(hashtable->batchCxt);
646 : :
580 peter@eisentraut.org 647 : 10205 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
648 : :
649 : : /*
650 : : * Set up for skew optimization, if possible and there's a need for
651 : : * more than one batch. (In a one-batch join, there's no point in
652 : : * it.)
653 : : */
2307 andres@anarazel.de 654 [ + + ]: 10205 : if (nbatch > 1)
655 : 63 : ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs);
656 : :
657 : 10205 : MemoryContextSwitchTo(oldcxt);
658 : : }
659 : :
9357 bruce@momjian.us 660 : 10403 : return hashtable;
661 : : }
662 : :
663 : :
664 : : /*
665 : : * Compute appropriate size for hashtable given the estimated size of the
666 : : * relation to be hashed (number of rows and average row width).
667 : : *
668 : : * This is exported so that the planner's costsize.c can use it.
669 : : */
670 : :
671 : : /* Target bucket loading (tuples per bucket) */
672 : : #define NTUP_PER_BUCKET 1
673 : :
674 : : void
5503 tgl@sss.pgh.pa.us 675 : 280577 : ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
676 : : bool try_combined_hash_mem,
677 : : int parallel_workers,
678 : : size_t *space_allowed,
679 : : int *numbuckets,
680 : : int *numbatches,
681 : : int *num_skew_mcvs)
682 : : {
683 : : int tupsize;
684 : : double inner_rel_bytes;
685 : : size_t hash_table_bytes;
686 : : size_t bucket_bytes;
687 : : size_t max_pointers;
3502 rhaas@postgresql.org 688 : 280577 : int nbatch = 1;
689 : : int nbuckets;
690 : : double dbuckets;
691 : :
692 : : /* Force a plausible relation size if no info */
8343 tgl@sss.pgh.pa.us 693 [ + + ]: 280577 : if (ntuples <= 0.0)
694 : 75 : ntuples = 1000.0;
695 : :
696 : : /*
697 : : * Estimate tupsize based on footprint of tuple in hashtable... note this
698 : : * does not allow for any palloc overhead. The manipulations of spaceUsed
699 : : * don't count palloc overhead either.
700 : : */
6501 701 : 280577 : tupsize = HJTUPLE_OVERHEAD +
3340 702 : 280577 : MAXALIGN(SizeofMinimalTupleHeader) +
6979 703 : 280577 : MAXALIGN(tupwidth);
704 : 280577 : inner_rel_bytes = ntuples * tupsize;
705 : :
706 : : /*
707 : : * Compute in-memory hashtable size limit from GUCs.
708 : : */
994 709 : 280577 : hash_table_bytes = get_hash_memory_limit();
710 : :
711 : : /*
712 : : * Parallel Hash tries to use the combined hash_mem of all workers to
713 : : * avoid the need to batch. If that won't work, it falls back to hash_mem
714 : : * per worker and tries to process batches in parallel.
715 : : */
1355 pg@bowt.ie 716 [ + + ]: 280577 : if (try_combined_hash_mem)
717 : : {
718 : : /* Careful, this could overflow size_t */
719 : : double newlimit;
720 : :
994 tgl@sss.pgh.pa.us 721 : 5985 : newlimit = (double) hash_table_bytes * (double) (parallel_workers + 1);
722 [ + - ]: 5985 : newlimit = Min(newlimit, (double) SIZE_MAX);
723 : 5985 : hash_table_bytes = (size_t) newlimit;
724 : : }
725 : :
2307 andres@anarazel.de 726 : 280577 : *space_allowed = hash_table_bytes;
727 : :
728 : : /*
729 : : * If skew optimization is possible, estimate the number of skew buckets
730 : : * that will fit in the memory allowed, and decrement the assumed space
731 : : * available for the main hash table accordingly.
732 : : *
733 : : * We make the optimistic assumption that each skew bucket will contain
734 : : * one inner-relation tuple. If that turns out to be low, we will recover
735 : : * at runtime by reducing the number of skew buckets.
736 : : *
737 : : * hashtable->skewBucket will have up to 8 times as many HashSkewBucket
738 : : * pointers as the number of MCVs we allow, since ExecHashBuildSkewHash
739 : : * will round up to the next power of 2 and then multiply by 4 to reduce
740 : : * collisions.
741 : : */
5503 tgl@sss.pgh.pa.us 742 [ + + ]: 280577 : if (useskew)
743 : : {
744 : : size_t bytes_per_mcv;
745 : : size_t skew_mcvs;
746 : :
747 : : /*----------
748 : : * Compute number of MCVs we could hold in hash_table_bytes
749 : : *
750 : : * Divisor is:
751 : : * size of a hash tuple +
752 : : * worst-case size of skewBucket[] per MCV +
753 : : * size of skewBucketNums[] entry +
754 : : * size of skew bucket struct itself
755 : : *----------
756 : : */
994 757 : 278386 : bytes_per_mcv = tupsize +
758 : : (8 * sizeof(HashSkewBucket *)) +
759 : 278386 : sizeof(int) +
760 : : SKEW_BUCKET_OVERHEAD;
761 : 278386 : skew_mcvs = hash_table_bytes / bytes_per_mcv;
762 : :
763 : : /*
764 : : * Now scale by SKEW_HASH_MEM_PERCENT (we do it in this order so as
765 : : * not to worry about size_t overflow in the multiplication)
766 : : */
767 : 278386 : skew_mcvs = (skew_mcvs * SKEW_HASH_MEM_PERCENT) / 100;
768 : :
769 : : /* Now clamp to integer range */
770 : 278386 : skew_mcvs = Min(skew_mcvs, INT_MAX);
771 : :
772 : 278386 : *num_skew_mcvs = (int) skew_mcvs;
773 : :
774 : : /* Reduce hash_table_bytes by the amount needed for the skew table */
775 [ + - ]: 278386 : if (skew_mcvs > 0)
776 : 278386 : hash_table_bytes -= skew_mcvs * bytes_per_mcv;
777 : : }
778 : : else
5503 779 : 2191 : *num_skew_mcvs = 0;
780 : :
781 : : /*
782 : : * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when
783 : : * memory is filled, assuming a single batch; but limit the value so that
784 : : * the pointer arrays we'll try to allocate do not exceed hash_table_bytes
785 : : * nor MaxAllocSize.
786 : : *
787 : : * Note that both nbuckets and nbatch must be powers of 2 to make
788 : : * ExecHashGetBucketAndBatch fast.
789 : : */
994 790 : 280577 : max_pointers = hash_table_bytes / sizeof(HashJoinTuple);
3115 791 : 280577 : max_pointers = Min(max_pointers, MaxAllocSize / sizeof(HashJoinTuple));
792 : : /* If max_pointers isn't a power of 2, must round it down to one */
994 793 : 280577 : max_pointers = pg_prevpower2_size_t(max_pointers);
794 : :
795 : : /* Also ensure we avoid integer overflow in nbatch and nbuckets */
796 : : /* (this step is redundant given the current value of MaxAllocSize) */
797 : 280577 : max_pointers = Min(max_pointers, INT_MAX / 2 + 1);
798 : :
3502 rhaas@postgresql.org 799 : 280577 : dbuckets = ceil(ntuples / NTUP_PER_BUCKET);
800 [ + + ]: 280577 : dbuckets = Min(dbuckets, max_pointers);
3115 tgl@sss.pgh.pa.us 801 : 280577 : nbuckets = (int) dbuckets;
802 : : /* don't let nbuckets be really small, though ... */
803 : 280577 : nbuckets = Max(nbuckets, 1024);
804 : : /* ... and force it to be a power of 2. */
994 805 : 280577 : nbuckets = pg_nextpower2_32(nbuckets);
806 : :
807 : : /*
808 : : * If there's not enough space to store the projected number of tuples and
809 : : * the required bucket headers, we will need multiple batches.
810 : : */
3115 811 : 280577 : bucket_bytes = sizeof(HashJoinTuple) * nbuckets;
3502 rhaas@postgresql.org 812 [ + + ]: 280577 : if (inner_rel_bytes + bucket_bytes > hash_table_bytes)
813 : : {
814 : : /* We'll need multiple batches */
815 : : size_t sbuckets;
816 : : double dbatch;
817 : : int minbatch;
818 : : size_t bucket_size;
819 : :
820 : : /*
821 : : * If Parallel Hash with combined hash_mem would still need multiple
822 : : * batches, we'll have to fall back to regular hash_mem budget.
823 : : */
1355 pg@bowt.ie 824 [ + + ]: 2519 : if (try_combined_hash_mem)
825 : : {
2307 andres@anarazel.de 826 : 123 : ExecChooseHashTableSize(ntuples, tupwidth, useskew,
827 : : false, parallel_workers,
828 : : space_allowed,
829 : : numbuckets,
830 : : numbatches,
831 : : num_skew_mcvs);
832 : 123 : return;
833 : : }
834 : :
835 : : /*
836 : : * Estimate the number of buckets we'll want to have when hash_mem is
837 : : * entirely full. Each bucket will contain a bucket pointer plus
838 : : * NTUP_PER_BUCKET tuples, whose projected size already includes
839 : : * overhead for the hash code, pointer to the next tuple, etc.
840 : : */
3502 rhaas@postgresql.org 841 : 2396 : bucket_size = (tupsize * NTUP_PER_BUCKET + sizeof(HashJoinTuple));
610 tgl@sss.pgh.pa.us 842 [ - + ]: 2396 : if (hash_table_bytes <= bucket_size)
610 tgl@sss.pgh.pa.us 843 :UBC 0 : sbuckets = 1; /* avoid pg_nextpower2_size_t(0) */
844 : : else
610 tgl@sss.pgh.pa.us 845 :CBC 2396 : sbuckets = pg_nextpower2_size_t(hash_table_bytes / bucket_size);
994 846 : 2396 : sbuckets = Min(sbuckets, max_pointers);
847 : 2396 : nbuckets = (int) sbuckets;
848 : 2396 : nbuckets = pg_nextpower2_32(nbuckets);
3502 rhaas@postgresql.org 849 : 2396 : bucket_bytes = nbuckets * sizeof(HashJoinTuple);
850 : :
851 : : /*
852 : : * Buckets are simple pointers to hashjoin tuples, while tupsize
853 : : * includes the pointer, hash code, and MinimalTupleData. So buckets
854 : : * should never really exceed 25% of hash_mem (even for
855 : : * NTUP_PER_BUCKET=1); except maybe for hash_mem values that are not
856 : : * 2^N bytes, where we might get more because of doubling. So let's
857 : : * look for 50% here.
858 : : */
859 [ - + ]: 2396 : Assert(bucket_bytes <= hash_table_bytes / 2);
860 : :
861 : : /* Calculate required number of batches. */
862 : 2396 : dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
5280 tgl@sss.pgh.pa.us 863 [ + - ]: 2396 : dbatch = Min(dbatch, max_pointers);
6979 864 : 2396 : minbatch = (int) dbatch;
1467 drowley@postgresql.o 865 : 2396 : nbatch = pg_nextpower2_32(Max(2, minbatch));
866 : : }
867 : :
3181 tgl@sss.pgh.pa.us 868 [ - + ]: 280454 : Assert(nbuckets > 0);
869 [ - + ]: 280454 : Assert(nbatch > 0);
870 : :
6979 871 : 280454 : *numbuckets = nbuckets;
8343 872 : 280454 : *numbatches = nbatch;
873 : : }
874 : :
875 : :
876 : : /* ----------------------------------------------------------------
877 : : * ExecHashTableDestroy
878 : : *
879 : : * destroy a hash table
880 : : * ----------------------------------------------------------------
881 : : */
882 : : void
9098 883 : 10349 : ExecHashTableDestroy(HashJoinTable hashtable)
884 : : {
885 : : int i;
886 : :
887 : : /*
888 : : * Make sure all the temp files are closed. We skip batch 0, since it
889 : : * can't have any temp files (and the arrays might not even exist if
890 : : * nbatch is only 1). Parallel hash joins don't use these files.
891 : : */
2307 andres@anarazel.de 892 [ + + ]: 10349 : if (hashtable->innerBatchFile != NULL)
893 : : {
894 [ + + ]: 924 : for (i = 1; i < hashtable->nbatch; i++)
895 : : {
896 [ - + ]: 813 : if (hashtable->innerBatchFile[i])
2307 andres@anarazel.de 897 :UBC 0 : BufFileClose(hashtable->innerBatchFile[i]);
2307 andres@anarazel.de 898 [ - + ]:CBC 813 : if (hashtable->outerBatchFile[i])
2307 andres@anarazel.de 899 :UBC 0 : BufFileClose(hashtable->outerBatchFile[i]);
900 : : }
901 : : }
902 : :
903 : : /* Release working memory (batchCxt is a child, so it goes away too) */
8691 tgl@sss.pgh.pa.us 904 :CBC 10349 : MemoryContextDelete(hashtable->hashCxt);
905 : :
906 : : /* And drop the control block */
9098 907 : 10349 : pfree(hashtable);
908 : 10349 : }
909 : :
910 : : /*
911 : : * ExecHashIncreaseNumBatches
912 : : * increase the original number of batches in order to reduce
913 : : * current memory consumption
914 : : */
915 : : static void
6979 916 : 414579 : ExecHashIncreaseNumBatches(HashJoinTable hashtable)
917 : : {
918 : 414579 : int oldnbatch = hashtable->nbatch;
919 : 414579 : int curbatch = hashtable->curbatch;
920 : : int nbatch;
921 : : long ninmemory;
922 : : long nfreed;
923 : : HashMemoryChunk oldchunks;
924 : :
925 : : /* do nothing if we've decided to shut off growth */
926 [ + + ]: 414579 : if (!hashtable->growEnabled)
927 : 414480 : return;
928 : :
929 : : /* safety check to avoid overflow */
5280 930 [ - + ]: 99 : if (oldnbatch > Min(INT_MAX / 2, MaxAllocSize / (sizeof(void *) * 2)))
6979 tgl@sss.pgh.pa.us 931 :UBC 0 : return;
932 : :
6979 tgl@sss.pgh.pa.us 933 :CBC 99 : nbatch = oldnbatch * 2;
934 [ - + ]: 99 : Assert(nbatch > 1);
935 : :
936 : : #ifdef HJDEBUG
937 : : printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n",
938 : : hashtable, nbatch, hashtable->spaceUsed);
939 : : #endif
940 : :
941 [ + + ]: 99 : if (hashtable->innerBatchFile == NULL)
942 : : {
331 tomas.vondra@postgre 943 : 48 : MemoryContext oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
944 : :
945 : : /* we had no file arrays before */
580 peter@eisentraut.org 946 : 48 : hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
947 : 48 : hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
948 : :
331 tomas.vondra@postgre 949 : 48 : MemoryContextSwitchTo(oldcxt);
950 : :
951 : : /* time to establish the temp tablespaces, too */
6156 tgl@sss.pgh.pa.us 952 : 48 : PrepareTempTablespaces();
953 : : }
954 : : else
955 : : {
956 : : /* enlarge arrays and zero out added entries */
519 peter@eisentraut.org 957 : 51 : hashtable->innerBatchFile = repalloc0_array(hashtable->innerBatchFile, BufFile *, oldnbatch, nbatch);
958 : 51 : hashtable->outerBatchFile = repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
959 : : }
960 : :
6979 tgl@sss.pgh.pa.us 961 : 99 : hashtable->nbatch = nbatch;
962 : :
963 : : /*
964 : : * Scan through the existing hash table entries and dump out any that are
965 : : * no longer of the current batch.
966 : : */
967 : 99 : ninmemory = nfreed = 0;
968 : :
969 : : /* If know we need to resize nbuckets, we can do it while rebatching. */
3471 kgrittn@postgresql.o 970 [ + + ]: 99 : if (hashtable->nbuckets_optimal != hashtable->nbuckets)
971 : : {
972 : : /* we never decrease the number of buckets */
973 [ - + ]: 48 : Assert(hashtable->nbuckets_optimal > hashtable->nbuckets);
974 : :
975 : 48 : hashtable->nbuckets = hashtable->nbuckets_optimal;
976 : 48 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
977 : :
2307 andres@anarazel.de 978 : 48 : hashtable->buckets.unshared =
580 peter@eisentraut.org 979 : 48 : repalloc_array(hashtable->buckets.unshared,
980 : : HashJoinTuple, hashtable->nbuckets);
981 : : }
982 : :
983 : : /*
984 : : * We will scan through the chunks directly, so that we can reset the
985 : : * buckets now and not have to keep track which tuples in the buckets have
986 : : * already been processed. We will free the old chunks as we go.
987 : : */
2307 andres@anarazel.de 988 : 99 : memset(hashtable->buckets.unshared, 0,
989 : 99 : sizeof(HashJoinTuple) * hashtable->nbuckets);
3504 heikki.linnakangas@i 990 : 99 : oldchunks = hashtable->chunks;
991 : 99 : hashtable->chunks = NULL;
992 : :
993 : : /* so, let's scan through the old chunks, and all tuples in each chunk */
994 [ + + ]: 495 : while (oldchunks != NULL)
995 : : {
2307 andres@anarazel.de 996 : 396 : HashMemoryChunk nextchunk = oldchunks->next.unshared;
997 : :
998 : : /* position within the buffer (up to oldchunks->used) */
3504 heikki.linnakangas@i 999 : 396 : size_t idx = 0;
1000 : :
1001 : : /* process all tuples stored in this chunk (and then free it) */
1002 [ + + ]: 270549 : while (idx < oldchunks->used)
1003 : : {
2294 tgl@sss.pgh.pa.us 1004 : 270153 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(oldchunks) + idx);
3504 heikki.linnakangas@i 1005 : 270153 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1006 : 270153 : int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
1007 : : int bucketno;
1008 : : int batchno;
1009 : :
6979 tgl@sss.pgh.pa.us 1010 : 270153 : ninmemory++;
3504 heikki.linnakangas@i 1011 : 270153 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1012 : : &bucketno, &batchno);
1013 : :
6979 tgl@sss.pgh.pa.us 1014 [ + + ]: 270153 : if (batchno == curbatch)
1015 : : {
1016 : : /* keep tuple in memory - copy it into the new chunk */
1017 : : HashJoinTuple copyTuple;
1018 : :
3248 1019 : 101433 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
3504 heikki.linnakangas@i 1020 : 101433 : memcpy(copyTuple, hashTuple, hashTupleSize);
1021 : :
1022 : : /* and add it back to the appropriate bucket */
2307 andres@anarazel.de 1023 : 101433 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1024 : 101433 : hashtable->buckets.unshared[bucketno] = copyTuple;
1025 : : }
1026 : : else
1027 : : {
1028 : : /* dump it out */
6979 tgl@sss.pgh.pa.us 1029 [ - + ]: 168720 : Assert(batchno > curbatch);
3504 heikki.linnakangas@i 1030 : 168720 : ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple),
1031 : : hashTuple->hashvalue,
331 tomas.vondra@postgre 1032 : 168720 : &hashtable->innerBatchFile[batchno],
1033 : : hashtable);
1034 : :
3504 heikki.linnakangas@i 1035 : 168720 : hashtable->spaceUsed -= hashTupleSize;
6979 tgl@sss.pgh.pa.us 1036 : 168720 : nfreed++;
1037 : : }
1038 : :
1039 : : /* next tuple in this chunk */
3504 heikki.linnakangas@i 1040 : 270153 : idx += MAXALIGN(hashTupleSize);
1041 : :
1042 : : /* allow this loop to be cancellable */
2615 tgl@sss.pgh.pa.us 1043 [ - + ]: 270153 : CHECK_FOR_INTERRUPTS();
1044 : : }
1045 : :
1046 : : /* we're done with this chunk - free it and proceed to the next one */
3504 heikki.linnakangas@i 1047 : 396 : pfree(oldchunks);
1048 : 396 : oldchunks = nextchunk;
1049 : : }
1050 : :
1051 : : #ifdef HJDEBUG
1052 : : printf("Hashjoin %p: freed %ld of %ld tuples, space now %zu\n",
1053 : : hashtable, nfreed, ninmemory, hashtable->spaceUsed);
1054 : : #endif
1055 : :
1056 : : /*
1057 : : * If we dumped out either all or none of the tuples in the table, disable
1058 : : * further expansion of nbatch. This situation implies that we have
1059 : : * enough tuples of identical hashvalues to overflow spaceAllowed.
1060 : : * Increasing nbatch will not fix it since there's no way to subdivide the
1061 : : * group any more finely. We have to just gut it out and hope the server
1062 : : * has enough RAM.
1063 : : */
6979 tgl@sss.pgh.pa.us 1064 [ + - + + ]: 99 : if (nfreed == 0 || nfreed == ninmemory)
1065 : : {
1066 : 24 : hashtable->growEnabled = false;
1067 : : #ifdef HJDEBUG
1068 : : printf("Hashjoin %p: disabling further increase of nbatch\n",
1069 : : hashtable);
1070 : : #endif
1071 : : }
1072 : : }
1073 : :
1074 : : /*
1075 : : * ExecParallelHashIncreaseNumBatches
1076 : : * Every participant attached to grow_batches_barrier must run this
1077 : : * function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
1078 : : */
1079 : : static void
2307 andres@anarazel.de 1080 : 51 : ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
1081 : : {
1082 : 51 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1083 : :
388 tmunro@postgresql.or 1084 [ - + ]: 51 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1085 : :
1086 : : /*
1087 : : * It's unlikely, but we need to be prepared for new participants to show
1088 : : * up while we're in the middle of this operation so we need to switch on
1089 : : * barrier phase here.
1090 : : */
2307 andres@anarazel.de 1091 [ + + + - : 51 : switch (PHJ_GROW_BATCHES_PHASE(BarrierPhase(&pstate->grow_batches_barrier)))
- - ]
1092 : : {
388 tmunro@postgresql.or 1093 : 47 : case PHJ_GROW_BATCHES_ELECT:
1094 : :
1095 : : /*
1096 : : * Elect one participant to prepare to grow the number of batches.
1097 : : * This involves reallocating or resetting the buckets of batch 0
1098 : : * in preparation for all participants to begin repartitioning the
1099 : : * tuples.
1100 : : */
2307 andres@anarazel.de 1101 [ + + ]: 47 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1102 : : WAIT_EVENT_HASH_GROW_BATCHES_ELECT))
1103 : : {
1104 : : dsa_pointer_atomic *buckets;
1105 : : ParallelHashJoinBatch *old_batch0;
1106 : : int new_nbatch;
1107 : : int i;
1108 : :
1109 : : /* Move the old batch out of the way. */
1110 : 36 : old_batch0 = hashtable->batches[0].shared;
1111 : 36 : pstate->old_batches = pstate->batches;
1112 : 36 : pstate->old_nbatch = hashtable->nbatch;
1113 : 36 : pstate->batches = InvalidDsaPointer;
1114 : :
1115 : : /* Free this backend's old accessors. */
1116 : 36 : ExecParallelHashCloseBatchAccessors(hashtable);
1117 : :
1118 : : /* Figure out how many batches to use. */
1119 [ + + ]: 36 : if (hashtable->nbatch == 1)
1120 : : {
1121 : : /*
1122 : : * We are going from single-batch to multi-batch. We need
1123 : : * to switch from one large combined memory budget to the
1124 : : * regular hash_mem budget.
1125 : : */
994 tgl@sss.pgh.pa.us 1126 : 18 : pstate->space_allowed = get_hash_memory_limit();
1127 : :
1128 : : /*
1129 : : * The combined hash_mem of all participants wasn't
1130 : : * enough. Therefore one batch per participant would be
1131 : : * approximately equivalent and would probably also be
1132 : : * insufficient. So try two batches per participant,
1133 : : * rounded up to a power of two.
1134 : : */
1135 : 18 : new_nbatch = pg_nextpower2_32(pstate->nparticipants * 2);
1136 : : }
1137 : : else
1138 : : {
1139 : : /*
1140 : : * We were already multi-batched. Try doubling the number
1141 : : * of batches.
1142 : : */
2307 andres@anarazel.de 1143 : 18 : new_nbatch = hashtable->nbatch * 2;
1144 : : }
1145 : :
1146 : : /* Allocate new larger generation of batches. */
1147 [ - + ]: 36 : Assert(hashtable->nbatch == pstate->nbatch);
1148 : 36 : ExecParallelHashJoinSetUpBatches(hashtable, new_nbatch);
1149 [ - + ]: 36 : Assert(hashtable->nbatch == pstate->nbatch);
1150 : :
1151 : : /* Replace or recycle batch 0's bucket array. */
1152 [ + + ]: 36 : if (pstate->old_nbatch == 1)
1153 : : {
1154 : : double dtuples;
1155 : : double dbuckets;
1156 : : int new_nbuckets;
1157 : : uint32 max_buckets;
1158 : :
1159 : : /*
1160 : : * We probably also need a smaller bucket array. How many
1161 : : * tuples do we expect per batch, assuming we have only
1162 : : * half of them so far? Normally we don't need to change
1163 : : * the bucket array's size, because the size of each batch
1164 : : * stays the same as we add more batches, but in this
1165 : : * special case we move from a large batch to many smaller
1166 : : * batches and it would be wasteful to keep the large
1167 : : * array.
1168 : : */
1169 : 18 : dtuples = (old_batch0->ntuples * 2.0) / new_nbatch;
1170 : :
1171 : : /*
1172 : : * We need to calculate the maximum number of buckets to
1173 : : * stay within the MaxAllocSize boundary. Round the
1174 : : * maximum number to the previous power of 2 given that
1175 : : * later we round the number to the next power of 2.
1176 : : */
98 akorotkov@postgresql 1177 : 18 : max_buckets = pg_prevpower2_32((uint32)
1178 : : (MaxAllocSize / sizeof(dsa_pointer_atomic)));
2307 andres@anarazel.de 1179 : 18 : dbuckets = ceil(dtuples / NTUP_PER_BUCKET);
98 akorotkov@postgresql 1180 [ + - ]: 18 : dbuckets = Min(dbuckets, max_buckets);
2307 andres@anarazel.de 1181 : 18 : new_nbuckets = (int) dbuckets;
1182 : 18 : new_nbuckets = Max(new_nbuckets, 1024);
994 tgl@sss.pgh.pa.us 1183 : 18 : new_nbuckets = pg_nextpower2_32(new_nbuckets);
2307 andres@anarazel.de 1184 : 18 : dsa_free(hashtable->area, old_batch0->buckets);
1185 : 36 : hashtable->batches[0].shared->buckets =
1186 : 18 : dsa_allocate(hashtable->area,
1187 : : sizeof(dsa_pointer_atomic) * new_nbuckets);
1188 : : buckets = (dsa_pointer_atomic *)
1189 : 18 : dsa_get_address(hashtable->area,
1190 : 18 : hashtable->batches[0].shared->buckets);
1191 [ + + ]: 51218 : for (i = 0; i < new_nbuckets; ++i)
1192 : 51200 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1193 : 18 : pstate->nbuckets = new_nbuckets;
1194 : : }
1195 : : else
1196 : : {
1197 : : /* Recycle the existing bucket array. */
1198 : 18 : hashtable->batches[0].shared->buckets = old_batch0->buckets;
1199 : : buckets = (dsa_pointer_atomic *)
1200 : 18 : dsa_get_address(hashtable->area, old_batch0->buckets);
1201 [ + + ]: 49170 : for (i = 0; i < hashtable->nbuckets; ++i)
1202 : 49152 : dsa_pointer_atomic_write(&buckets[i], InvalidDsaPointer);
1203 : : }
1204 : :
1205 : : /* Move all chunks to the work queue for parallel processing. */
1206 : 36 : pstate->chunk_work_queue = old_batch0->chunks;
1207 : :
1208 : : /* Disable further growth temporarily while we're growing. */
1209 : 36 : pstate->growth = PHJ_GROWTH_DISABLED;
1210 : : }
1211 : : else
1212 : : {
1213 : : /* All other participants just flush their tuples to disk. */
1214 : 11 : ExecParallelHashCloseBatchAccessors(hashtable);
1215 : : }
1216 : : /* Fall through. */
1217 : :
1218 : : case PHJ_GROW_BATCHES_REALLOCATE:
1219 : : /* Wait for the above to be finished. */
1220 : 48 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1221 : : WAIT_EVENT_HASH_GROW_BATCHES_REALLOCATE);
1222 : : /* Fall through. */
1223 : :
388 tmunro@postgresql.or 1224 : 51 : case PHJ_GROW_BATCHES_REPARTITION:
1225 : : /* Make sure that we have the current dimensions and buckets. */
2307 andres@anarazel.de 1226 : 51 : ExecParallelHashEnsureBatchAccessors(hashtable);
1227 : 51 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1228 : : /* Then partition, flush counters. */
1229 : 51 : ExecParallelHashRepartitionFirst(hashtable);
1230 : 51 : ExecParallelHashRepartitionRest(hashtable);
1231 : 51 : ExecParallelHashMergeCounters(hashtable);
1232 : : /* Wait for the above to be finished. */
1233 : 51 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1234 : : WAIT_EVENT_HASH_GROW_BATCHES_REPARTITION);
1235 : : /* Fall through. */
1236 : :
388 tmunro@postgresql.or 1237 : 51 : case PHJ_GROW_BATCHES_DECIDE:
1238 : :
1239 : : /*
1240 : : * Elect one participant to clean up and decide whether further
1241 : : * repartitioning is needed, or should be disabled because it's
1242 : : * not helping.
1243 : : */
2307 andres@anarazel.de 1244 [ + + ]: 51 : if (BarrierArriveAndWait(&pstate->grow_batches_barrier,
1245 : : WAIT_EVENT_HASH_GROW_BATCHES_DECIDE))
1246 : : {
1247 : 36 : bool space_exhausted = false;
1248 : 36 : bool extreme_skew_detected = false;
1249 : :
1250 : : /* Make sure that we have the current dimensions and buckets. */
1251 : 36 : ExecParallelHashEnsureBatchAccessors(hashtable);
1252 : 36 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1253 : :
1254 : : /* Are any of the new generation of batches exhausted? */
599 drowley@postgresql.o 1255 [ + + ]: 372 : for (int i = 0; i < hashtable->nbatch; ++i)
1256 : : {
2307 andres@anarazel.de 1257 : 336 : ParallelHashJoinBatch *batch = hashtable->batches[i].shared;
1258 : :
1259 [ + - ]: 336 : if (batch->space_exhausted ||
1260 [ + + ]: 336 : batch->estimated_size > pstate->space_allowed)
1261 : : {
1262 : : int parent;
1263 : :
1264 : 12 : space_exhausted = true;
1265 : :
1266 : : /*
1267 : : * Did this batch receive ALL of the tuples from its
1268 : : * parent batch? That would indicate that further
1269 : : * repartitioning isn't going to help (the hash values
1270 : : * are probably all the same).
1271 : : */
1272 : 12 : parent = i % pstate->old_nbatch;
1273 [ + - ]: 12 : if (batch->ntuples == hashtable->batches[parent].shared->old_ntuples)
1274 : 12 : extreme_skew_detected = true;
1275 : : }
1276 : : }
1277 : :
1278 : : /* Don't keep growing if it's not helping or we'd overflow. */
1279 [ + + - + ]: 36 : if (extreme_skew_detected || hashtable->nbatch >= INT_MAX / 2)
1280 : 12 : pstate->growth = PHJ_GROWTH_DISABLED;
1281 [ - + ]: 24 : else if (space_exhausted)
2307 andres@anarazel.de 1282 :UBC 0 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
1283 : : else
2307 andres@anarazel.de 1284 :CBC 24 : pstate->growth = PHJ_GROWTH_OK;
1285 : :
1286 : : /* Free the old batches in shared memory. */
1287 : 36 : dsa_free(hashtable->area, pstate->old_batches);
1288 : 36 : pstate->old_batches = InvalidDsaPointer;
1289 : : }
1290 : : /* Fall through. */
1291 : :
1292 : : case PHJ_GROW_BATCHES_FINISH:
1293 : : /* Wait for the above to complete. */
1294 : 51 : BarrierArriveAndWait(&pstate->grow_batches_barrier,
1295 : : WAIT_EVENT_HASH_GROW_BATCHES_FINISH);
1296 : : }
1297 : 51 : }
1298 : :
1299 : : /*
1300 : : * Repartition the tuples currently loaded into memory for inner batch 0
1301 : : * because the number of batches has been increased. Some tuples are retained
1302 : : * in memory and some are written out to a later batch.
1303 : : */
1304 : : static void
1305 : 51 : ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
1306 : : {
1307 : : dsa_pointer chunk_shared;
1308 : : HashMemoryChunk chunk;
1309 : :
2303 1310 [ - + ]: 51 : Assert(hashtable->nbatch == hashtable->parallel_state->nbatch);
1311 : :
2307 1312 [ + + ]: 269 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_shared)))
1313 : : {
1314 : 167 : size_t idx = 0;
1315 : :
1316 : : /* Repartition all tuples in this chunk. */
1317 [ + + ]: 122244 : while (idx < chunk->used)
1318 : : {
2294 tgl@sss.pgh.pa.us 1319 : 122077 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
2307 andres@anarazel.de 1320 : 122077 : MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
1321 : : HashJoinTuple copyTuple;
1322 : : dsa_pointer shared;
1323 : : int bucketno;
1324 : : int batchno;
1325 : :
1326 : 122077 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1327 : : &bucketno, &batchno);
1328 : :
1329 [ - + ]: 122077 : Assert(batchno < hashtable->nbatch);
1330 [ + + ]: 122077 : if (batchno == 0)
1331 : : {
1332 : : /* It still belongs in batch 0. Copy to a new chunk. */
1333 : : copyTuple =
1334 : 30820 : ExecParallelHashTupleAlloc(hashtable,
1335 : 30820 : HJTUPLE_OVERHEAD + tuple->t_len,
1336 : : &shared);
1337 : 30820 : copyTuple->hashvalue = hashTuple->hashvalue;
1338 : 30820 : memcpy(HJTUPLE_MINTUPLE(copyTuple), tuple, tuple->t_len);
1339 : 30820 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1340 : : copyTuple, shared);
1341 : : }
1342 : : else
1343 : : {
1344 : 91257 : size_t tuple_size =
331 tgl@sss.pgh.pa.us 1345 : 91257 : MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1346 : :
1347 : : /* It belongs in a later batch. */
2307 andres@anarazel.de 1348 : 91257 : hashtable->batches[batchno].estimated_size += tuple_size;
1349 : 91257 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1350 : 91257 : &hashTuple->hashvalue, tuple);
1351 : : }
1352 : :
1353 : : /* Count this tuple. */
1354 : 122077 : ++hashtable->batches[0].old_ntuples;
1355 : 122077 : ++hashtable->batches[batchno].ntuples;
1356 : :
1357 : 122077 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1358 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1359 : : }
1360 : :
1361 : : /* Free this chunk. */
1362 : 167 : dsa_free(hashtable->area, chunk_shared);
1363 : :
1364 [ - + ]: 167 : CHECK_FOR_INTERRUPTS();
1365 : : }
1366 : 51 : }
1367 : :
1368 : : /*
1369 : : * Help repartition inner batches 1..n.
1370 : : */
1371 : : static void
1372 : 51 : ExecParallelHashRepartitionRest(HashJoinTable hashtable)
1373 : : {
1374 : 51 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1375 : 51 : int old_nbatch = pstate->old_nbatch;
1376 : : SharedTuplestoreAccessor **old_inner_tuples;
1377 : : ParallelHashJoinBatch *old_batches;
1378 : : int i;
1379 : :
1380 : : /* Get our hands on the previous generation of batches. */
1381 : : old_batches = (ParallelHashJoinBatch *)
1382 : 51 : dsa_get_address(hashtable->area, pstate->old_batches);
580 peter@eisentraut.org 1383 : 51 : old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
2307 andres@anarazel.de 1384 [ + + ]: 171 : for (i = 1; i < old_nbatch; ++i)
1385 : : {
1386 : 120 : ParallelHashJoinBatch *shared =
331 tgl@sss.pgh.pa.us 1387 : 120 : NthParallelHashJoinBatch(old_batches, i);
1388 : :
2307 andres@anarazel.de 1389 : 120 : old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
1390 : : ParallelWorkerNumber + 1,
1391 : : &pstate->fileset);
1392 : : }
1393 : :
1394 : : /* Join in the effort to repartition them. */
1395 [ + + ]: 171 : for (i = 1; i < old_nbatch; ++i)
1396 : : {
1397 : : MinimalTuple tuple;
1398 : : uint32 hashvalue;
1399 : :
1400 : : /* Scan one partition from the previous generation. */
1401 : 120 : sts_begin_parallel_scan(old_inner_tuples[i]);
1402 [ + + ]: 177547 : while ((tuple = sts_parallel_scan_next(old_inner_tuples[i], &hashvalue)))
1403 : : {
1404 : 177427 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1405 : : int bucketno;
1406 : : int batchno;
1407 : :
1408 : : /* Decide which partition it goes to in the new generation. */
1409 : 177427 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno,
1410 : : &batchno);
1411 : :
1412 : 177427 : hashtable->batches[batchno].estimated_size += tuple_size;
1413 : 177427 : ++hashtable->batches[batchno].ntuples;
1414 : 177427 : ++hashtable->batches[i].old_ntuples;
1415 : :
1416 : : /* Store the tuple its new batch. */
1417 : 177427 : sts_puttuple(hashtable->batches[batchno].inner_tuples,
1418 : : &hashvalue, tuple);
1419 : :
1420 [ - + ]: 177427 : CHECK_FOR_INTERRUPTS();
1421 : : }
1422 : 120 : sts_end_parallel_scan(old_inner_tuples[i]);
1423 : : }
1424 : :
1425 : 51 : pfree(old_inner_tuples);
1426 : 51 : }
1427 : :
1428 : : /*
1429 : : * Transfer the backend-local per-batch counters to the shared totals.
1430 : : */
1431 : : static void
1432 : 228 : ExecParallelHashMergeCounters(HashJoinTable hashtable)
1433 : : {
1434 : 228 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1435 : : int i;
1436 : :
1437 : 228 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
1438 : 228 : pstate->total_tuples = 0;
1439 [ + + ]: 1524 : for (i = 0; i < hashtable->nbatch; ++i)
1440 : : {
1441 : 1296 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[i];
1442 : :
1443 : 1296 : batch->shared->size += batch->size;
1444 : 1296 : batch->shared->estimated_size += batch->estimated_size;
1445 : 1296 : batch->shared->ntuples += batch->ntuples;
1446 : 1296 : batch->shared->old_ntuples += batch->old_ntuples;
1447 : 1296 : batch->size = 0;
1448 : 1296 : batch->estimated_size = 0;
1449 : 1296 : batch->ntuples = 0;
1450 : 1296 : batch->old_ntuples = 0;
1451 : 1296 : pstate->total_tuples += batch->shared->ntuples;
1452 : : }
1453 : 228 : LWLockRelease(&pstate->lock);
1454 : 228 : }
1455 : :
1456 : : /*
1457 : : * ExecHashIncreaseNumBuckets
1458 : : * increase the original number of buckets in order to reduce
1459 : : * number of tuples per bucket
1460 : : */
1461 : : static void
3471 kgrittn@postgresql.o 1462 : 57 : ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
1463 : : {
1464 : : HashMemoryChunk chunk;
1465 : :
1466 : : /* do nothing if not an increase (it's called increase for a reason) */
1467 [ - + ]: 57 : if (hashtable->nbuckets >= hashtable->nbuckets_optimal)
3471 kgrittn@postgresql.o 1468 :UBC 0 : return;
1469 : :
1470 : : #ifdef HJDEBUG
1471 : : printf("Hashjoin %p: increasing nbuckets %d => %d\n",
1472 : : hashtable, hashtable->nbuckets, hashtable->nbuckets_optimal);
1473 : : #endif
1474 : :
3471 kgrittn@postgresql.o 1475 :CBC 57 : hashtable->nbuckets = hashtable->nbuckets_optimal;
3115 tgl@sss.pgh.pa.us 1476 : 57 : hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
1477 : :
3471 kgrittn@postgresql.o 1478 [ - + ]: 57 : Assert(hashtable->nbuckets > 1);
1479 [ - + ]: 57 : Assert(hashtable->nbuckets <= (INT_MAX / 2));
1480 [ - + ]: 57 : Assert(hashtable->nbuckets == (1 << hashtable->log2_nbuckets));
1481 : :
1482 : : /*
1483 : : * Just reallocate the proper number of buckets - we don't need to walk
1484 : : * through them - we can walk the dense-allocated chunks (just like in
1485 : : * ExecHashIncreaseNumBatches, but without all the copying into new
1486 : : * chunks)
1487 : : */
2307 andres@anarazel.de 1488 : 57 : hashtable->buckets.unshared =
580 peter@eisentraut.org 1489 : 57 : repalloc_array(hashtable->buckets.unshared,
1490 : : HashJoinTuple, hashtable->nbuckets);
1491 : :
2307 andres@anarazel.de 1492 : 57 : memset(hashtable->buckets.unshared, 0,
1493 : 57 : hashtable->nbuckets * sizeof(HashJoinTuple));
1494 : :
1495 : : /* scan through all tuples in all chunks to rebuild the hash table */
1496 [ + + ]: 600 : for (chunk = hashtable->chunks; chunk != NULL; chunk = chunk->next.unshared)
1497 : : {
1498 : : /* process all tuples stored in this chunk */
3249 bruce@momjian.us 1499 : 543 : size_t idx = 0;
1500 : :
3471 kgrittn@postgresql.o 1501 [ + + ]: 401369 : while (idx < chunk->used)
1502 : : {
2294 tgl@sss.pgh.pa.us 1503 : 400826 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
1504 : : int bucketno;
1505 : : int batchno;
1506 : :
3471 kgrittn@postgresql.o 1507 : 400826 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1508 : : &bucketno, &batchno);
1509 : :
1510 : : /* add the tuple to the proper bucket */
2307 andres@anarazel.de 1511 : 400826 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1512 : 400826 : hashtable->buckets.unshared[bucketno] = hashTuple;
1513 : :
1514 : : /* advance index past the tuple */
3471 kgrittn@postgresql.o 1515 : 400826 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1516 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1517 : : }
1518 : :
1519 : : /* allow this loop to be cancellable */
2455 andres@anarazel.de 1520 [ - + ]: 543 : CHECK_FOR_INTERRUPTS();
1521 : : }
1522 : : }
1523 : :
1524 : : static void
2307 1525 : 57 : ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
1526 : : {
1527 : 57 : ParallelHashJoinState *pstate = hashtable->parallel_state;
1528 : : int i;
1529 : : HashMemoryChunk chunk;
1530 : : dsa_pointer chunk_s;
1531 : :
388 tmunro@postgresql.or 1532 [ - + ]: 57 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
1533 : :
1534 : : /*
1535 : : * It's unlikely, but we need to be prepared for new participants to show
1536 : : * up while we're in the middle of this operation so we need to switch on
1537 : : * barrier phase here.
1538 : : */
2307 andres@anarazel.de 1539 [ + - + - ]: 57 : switch (PHJ_GROW_BUCKETS_PHASE(BarrierPhase(&pstate->grow_buckets_barrier)))
1540 : : {
388 tmunro@postgresql.or 1541 : 56 : case PHJ_GROW_BUCKETS_ELECT:
1542 : : /* Elect one participant to prepare to increase nbuckets. */
2307 andres@anarazel.de 1543 [ + + ]: 56 : if (BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1544 : : WAIT_EVENT_HASH_GROW_BUCKETS_ELECT))
1545 : : {
1546 : : size_t size;
1547 : : dsa_pointer_atomic *buckets;
1548 : :
1549 : : /* Double the size of the bucket array. */
1550 : 52 : pstate->nbuckets *= 2;
1551 : 52 : size = pstate->nbuckets * sizeof(dsa_pointer_atomic);
1552 : 52 : hashtable->batches[0].shared->size += size / 2;
1553 : 52 : dsa_free(hashtable->area, hashtable->batches[0].shared->buckets);
1554 : 104 : hashtable->batches[0].shared->buckets =
1555 : 52 : dsa_allocate(hashtable->area, size);
1556 : : buckets = (dsa_pointer_atomic *)
1557 : 52 : dsa_get_address(hashtable->area,
1558 : 52 : hashtable->batches[0].shared->buckets);
1559 [ + + ]: 450612 : for (i = 0; i < pstate->nbuckets; ++i)
1560 : 450560 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
1561 : :
1562 : : /* Put the chunk list onto the work queue. */
1563 : 52 : pstate->chunk_work_queue = hashtable->batches[0].shared->chunks;
1564 : :
1565 : : /* Clear the flag. */
1566 : 52 : pstate->growth = PHJ_GROWTH_OK;
1567 : : }
1568 : : /* Fall through. */
1569 : :
1570 : : case PHJ_GROW_BUCKETS_REALLOCATE:
1571 : : /* Wait for the above to complete. */
1572 : 56 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1573 : : WAIT_EVENT_HASH_GROW_BUCKETS_REALLOCATE);
1574 : : /* Fall through. */
1575 : :
388 tmunro@postgresql.or 1576 : 57 : case PHJ_GROW_BUCKETS_REINSERT:
1577 : : /* Reinsert all tuples into the hash table. */
2307 andres@anarazel.de 1578 : 57 : ExecParallelHashEnsureBatchAccessors(hashtable);
1579 : 57 : ExecParallelHashTableSetCurrentBatch(hashtable, 0);
1580 [ + + ]: 441 : while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s)))
1581 : : {
1582 : 327 : size_t idx = 0;
1583 : :
1584 [ + + ]: 262541 : while (idx < chunk->used)
1585 : : {
2294 tgl@sss.pgh.pa.us 1586 : 262214 : HashJoinTuple hashTuple = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + idx);
2307 andres@anarazel.de 1587 : 262214 : dsa_pointer shared = chunk_s + HASH_CHUNK_HEADER_SIZE + idx;
1588 : : int bucketno;
1589 : : int batchno;
1590 : :
1591 : 262214 : ExecHashGetBucketAndBatch(hashtable, hashTuple->hashvalue,
1592 : : &bucketno, &batchno);
1593 [ - + ]: 262214 : Assert(batchno == 0);
1594 : :
1595 : : /* add the tuple to the proper bucket */
1596 : 262214 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1597 : : hashTuple, shared);
1598 : :
1599 : : /* advance index past the tuple */
1600 : 262214 : idx += MAXALIGN(HJTUPLE_OVERHEAD +
1601 : : HJTUPLE_MINTUPLE(hashTuple)->t_len);
1602 : : }
1603 : :
1604 : : /* allow this loop to be cancellable */
1605 [ - + ]: 327 : CHECK_FOR_INTERRUPTS();
1606 : : }
1607 : 57 : BarrierArriveAndWait(&pstate->grow_buckets_barrier,
1608 : : WAIT_EVENT_HASH_GROW_BUCKETS_REINSERT);
1609 : : }
1610 : 57 : }
1611 : :
1612 : : /*
1613 : : * ExecHashTableInsert
1614 : : * insert a tuple into the hash table depending on the hash value
1615 : : * it may just go to a temp file for later batches
1616 : : *
1617 : : * Note: the passed TupleTableSlot may contain a regular, minimal, or virtual
1618 : : * tuple; the minimal case in particular is certain to happen while reloading
1619 : : * tuples from batch files. We could save some cycles in the regular-tuple
1620 : : * case by not forcing the slot contents into minimal form; not clear if it's
1621 : : * worth the messiness required.
1622 : : */
1623 : : void
10141 scrappy@hub.org 1624 : 6195391 : ExecHashTableInsert(HashJoinTable hashtable,
1625 : : TupleTableSlot *slot,
1626 : : uint32 hashvalue)
1627 : : {
1628 : : bool shouldFree;
1977 andres@anarazel.de 1629 : 6195391 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1630 : : int bucketno;
1631 : : int batchno;
1632 : :
6979 tgl@sss.pgh.pa.us 1633 : 6195391 : ExecHashGetBucketAndBatch(hashtable, hashvalue,
1634 : : &bucketno, &batchno);
1635 : :
1636 : : /*
1637 : : * decide whether to put the tuple in the hash table or a temp file
1638 : : */
1639 [ + + ]: 6195391 : if (batchno == hashtable->curbatch)
1640 : : {
1641 : : /*
1642 : : * put the tuple in hash table
1643 : : */
1644 : : HashJoinTuple hashTuple;
1645 : : int hashTupleSize;
3471 kgrittn@postgresql.o 1646 : 4554721 : double ntuples = (hashtable->totalTuples - hashtable->skewTuples);
1647 : :
1648 : : /* Create the HashJoinTuple */
6501 tgl@sss.pgh.pa.us 1649 : 4554721 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
3504 heikki.linnakangas@i 1650 : 4554721 : hashTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize);
1651 : :
6979 tgl@sss.pgh.pa.us 1652 : 4554721 : hashTuple->hashvalue = hashvalue;
6501 1653 : 4554721 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1654 : :
1655 : : /*
1656 : : * We always reset the tuple-matched flag on insertion. This is okay
1657 : : * even when reloading a tuple from a batch file, since the tuple
1658 : : * could not possibly have been matched to an outer tuple before it
1659 : : * went into the batch file.
1660 : : */
4854 1661 : 4554721 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1662 : :
1663 : : /* Push it onto the front of the bucket's list */
2307 andres@anarazel.de 1664 : 4554721 : hashTuple->next.unshared = hashtable->buckets.unshared[bucketno];
1665 : 4554721 : hashtable->buckets.unshared[bucketno] = hashTuple;
1666 : :
1667 : : /*
1668 : : * Increase the (optimal) number of buckets if we just exceeded the
1669 : : * NTUP_PER_BUCKET threshold, but only when there's still a single
1670 : : * batch.
1671 : : */
3115 tgl@sss.pgh.pa.us 1672 [ + + ]: 4554721 : if (hashtable->nbatch == 1 &&
1673 [ + + ]: 2687278 : ntuples > (hashtable->nbuckets_optimal * NTUP_PER_BUCKET))
1674 : : {
1675 : : /* Guard against integer overflow and alloc size overflow */
1676 [ + - ]: 153 : if (hashtable->nbuckets_optimal <= INT_MAX / 2 &&
1677 [ + - ]: 153 : hashtable->nbuckets_optimal * 2 <= MaxAllocSize / sizeof(HashJoinTuple))
1678 : : {
1679 : 153 : hashtable->nbuckets_optimal *= 2;
1680 : 153 : hashtable->log2_nbuckets_optimal += 1;
1681 : : }
1682 : : }
1683 : :
1684 : : /* Account for space used, and back off if we've used too much */
6979 1685 : 4554721 : hashtable->spaceUsed += hashTupleSize;
5186 rhaas@postgresql.org 1686 [ + + ]: 4554721 : if (hashtable->spaceUsed > hashtable->spacePeak)
1687 : 3182689 : hashtable->spacePeak = hashtable->spaceUsed;
3471 kgrittn@postgresql.o 1688 : 4554721 : if (hashtable->spaceUsed +
1689 : 4554721 : hashtable->nbuckets_optimal * sizeof(HashJoinTuple)
3502 rhaas@postgresql.org 1690 [ + + ]: 4554721 : > hashtable->spaceAllowed)
6979 tgl@sss.pgh.pa.us 1691 : 414579 : ExecHashIncreaseNumBatches(hashtable);
1692 : : }
1693 : : else
1694 : : {
1695 : : /*
1696 : : * put the tuple into a temp file for later batches
1697 : : */
1698 [ - + ]: 1640670 : Assert(batchno > hashtable->curbatch);
6156 1699 : 1640670 : ExecHashJoinSaveTuple(tuple,
1700 : : hashvalue,
331 tomas.vondra@postgre 1701 : 1640670 : &hashtable->innerBatchFile[batchno],
1702 : : hashtable);
1703 : : }
1704 : :
1977 andres@anarazel.de 1705 [ + + ]: 6195391 : if (shouldFree)
1706 : 4365553 : heap_free_minimal_tuple(tuple);
10141 scrappy@hub.org 1707 : 6195391 : }
1708 : :
1709 : : /*
1710 : : * ExecParallelHashTableInsert
1711 : : * insert a tuple into a shared hash table or shared batch tuplestore
1712 : : */
1713 : : void
2307 andres@anarazel.de 1714 : 1080063 : ExecParallelHashTableInsert(HashJoinTable hashtable,
1715 : : TupleTableSlot *slot,
1716 : : uint32 hashvalue)
1717 : : {
1718 : : bool shouldFree;
1977 1719 : 1080063 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1720 : : dsa_pointer shared;
1721 : : int bucketno;
1722 : : int batchno;
1723 : :
2307 1724 : 191 : retry:
1725 : 1080254 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1726 : :
1727 [ + + ]: 1080254 : if (batchno == 0)
1728 : : {
1729 : : HashJoinTuple hashTuple;
1730 : :
1731 : : /* Try to load it into memory. */
1732 [ - + ]: 622538 : Assert(BarrierPhase(&hashtable->parallel_state->build_barrier) ==
1733 : : PHJ_BUILD_HASH_INNER);
1734 : 622538 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1735 : 622538 : HJTUPLE_OVERHEAD + tuple->t_len,
1736 : : &shared);
1737 [ + + ]: 622538 : if (hashTuple == NULL)
1738 : 158 : goto retry;
1739 : :
1740 : : /* Store the hash value in the HashJoinTuple header. */
1741 : 622380 : hashTuple->hashvalue = hashvalue;
1742 : 622380 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
366 tmunro@postgresql.or 1743 : 622380 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1744 : :
1745 : : /* Push it onto the front of the bucket's list */
2307 andres@anarazel.de 1746 : 622380 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1747 : : hashTuple, shared);
1748 : : }
1749 : : else
1750 : : {
1751 : 457716 : size_t tuple_size = MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
1752 : :
1753 [ - + ]: 457716 : Assert(batchno > 0);
1754 : :
1755 : : /* Try to preallocate space in the batch if necessary. */
1756 [ + + ]: 457716 : if (hashtable->batches[batchno].preallocated < tuple_size)
1757 : : {
1758 [ + + ]: 1017 : if (!ExecParallelHashTuplePrealloc(hashtable, batchno, tuple_size))
1759 : 33 : goto retry;
1760 : : }
1761 : :
1762 [ - + ]: 457683 : Assert(hashtable->batches[batchno].preallocated >= tuple_size);
1763 : 457683 : hashtable->batches[batchno].preallocated -= tuple_size;
1764 : 457683 : sts_puttuple(hashtable->batches[batchno].inner_tuples, &hashvalue,
1765 : : tuple);
1766 : : }
1767 : 1080063 : ++hashtable->batches[batchno].ntuples;
1768 : :
1977 1769 [ + - ]: 1080063 : if (shouldFree)
1770 : 1080063 : heap_free_minimal_tuple(tuple);
2307 1771 : 1080063 : }
1772 : :
1773 : : /*
1774 : : * Insert a tuple into the current hash table. Unlike
1775 : : * ExecParallelHashTableInsert, this version is not prepared to send the tuple
1776 : : * to other batches or to run out of memory, and should only be called with
1777 : : * tuples that belong in the current batch once growth has been disabled.
1778 : : */
1779 : : void
1780 : 548940 : ExecParallelHashTableInsertCurrentBatch(HashJoinTable hashtable,
1781 : : TupleTableSlot *slot,
1782 : : uint32 hashvalue)
1783 : : {
1784 : : bool shouldFree;
1977 1785 : 548940 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
1786 : : HashJoinTuple hashTuple;
1787 : : dsa_pointer shared;
1788 : : int batchno;
1789 : : int bucketno;
1790 : :
2307 1791 : 548940 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
1792 [ - + ]: 548940 : Assert(batchno == hashtable->curbatch);
1793 : 548940 : hashTuple = ExecParallelHashTupleAlloc(hashtable,
1794 : 548940 : HJTUPLE_OVERHEAD + tuple->t_len,
1795 : : &shared);
1796 : 548940 : hashTuple->hashvalue = hashvalue;
1797 : 548940 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
1798 : 548940 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
1799 : 548940 : ExecParallelHashPushTuple(&hashtable->buckets.shared[bucketno],
1800 : : hashTuple, shared);
1801 : :
1977 1802 [ - + ]: 548940 : if (shouldFree)
1977 andres@anarazel.de 1803 :UBC 0 : heap_free_minimal_tuple(tuple);
2307 andres@anarazel.de 1804 :CBC 548940 : }
1805 : :
1806 : : /*
1807 : : * ExecHashGetHashValue
1808 : : * Compute the hash value for a tuple
1809 : : *
1810 : : * The tuple to be tested must be in econtext->ecxt_outertuple (thus Vars in
1811 : : * the hashkeys expressions need to have OUTER_VAR as varno). If outer_tuple
1812 : : * is false (meaning it's the HashJoin's inner node, Hash), econtext,
1813 : : * hashkeys, and slot need to be from Hash, with hashkeys/slot referencing and
1814 : : * being suitable for tuples from the node below the Hash. Conversely, if
1815 : : * outer_tuple is true, econtext is from HashJoin, and hashkeys/slot need to
1816 : : * be appropriate for tuples from HashJoin's outer node.
1817 : : *
1818 : : * A true result means the tuple's hash value has been successfully computed
1819 : : * and stored at *hashvalue. A false result means the tuple cannot match
1820 : : * because it contains a null attribute, and hence it should be discarded
1821 : : * immediately. (If keep_nulls is true then false is never returned.)
1822 : : */
1823 : : bool
6979 tgl@sss.pgh.pa.us 1824 : 13151943 : ExecHashGetHashValue(HashJoinTable hashtable,
1825 : : ExprContext *econtext,
1826 : : List *hashkeys,
1827 : : bool outer_tuple,
1828 : : bool keep_nulls,
1829 : : uint32 *hashvalue)
1830 : : {
7806 1831 : 13151943 : uint32 hashkey = 0;
1832 : : FmgrInfo *hashfunctions;
1833 : : ListCell *hk;
1834 : 13151943 : int i = 0;
1835 : : MemoryContext oldContext;
1836 : :
1837 : : /*
1838 : : * We reset the eval context each time to reclaim any memory leaked in the
1839 : : * hashkey expressions.
1840 : : */
8677 1841 : 13151943 : ResetExprContext(econtext);
1842 : :
8280 1843 : 13151943 : oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
1844 : :
6284 1845 [ + + ]: 13151943 : if (outer_tuple)
1846 : 7685681 : hashfunctions = hashtable->outer_hashfunctions;
1847 : : else
1848 : 5466262 : hashfunctions = hashtable->inner_hashfunctions;
1849 : :
7806 1850 [ + - + + : 27099261 : foreach(hk, hashkeys)
+ + ]
1851 : : {
7333 1852 : 13947734 : ExprState *keyexpr = (ExprState *) lfirst(hk);
1853 : : Datum keyval;
1854 : : bool isNull;
1855 : :
1856 : : /* combine successive hashkeys by rotating */
784 john.naylor@postgres 1857 : 13947734 : hashkey = pg_rotate_left32(hashkey, 1);
1858 : :
1859 : : /*
1860 : : * Get the join attribute value of the tuple
1861 : : */
2642 andres@anarazel.de 1862 : 13947734 : keyval = ExecEvalExpr(keyexpr, econtext, &isNull);
1863 : :
1864 : : /*
1865 : : * If the attribute is NULL, and the join operator is strict, then
1866 : : * this tuple cannot pass the join qual so we can reject it
1867 : : * immediately (unless we're scanning the outside of an outer join, in
1868 : : * which case we must not reject it). Otherwise we act like the
1869 : : * hashcode of NULL is zero (this will support operators that act like
1870 : : * IS NOT DISTINCT, though not any more-random behavior). We treat
1871 : : * the hash support function as strict even if the operator is not.
1872 : : *
1873 : : * Note: currently, all hashjoinable operators must be strict since
1874 : : * the hash index AM assumes that. However, it takes so little extra
1875 : : * code here to allow non-strict that we may as well do it.
1876 : : */
6286 tgl@sss.pgh.pa.us 1877 [ + + ]: 13947734 : if (isNull)
1878 : : {
1879 [ + - + + ]: 532 : if (hashtable->hashStrict[i] && !keep_nulls)
1880 : : {
6261 1881 : 416 : MemoryContextSwitchTo(oldContext);
5995 bruce@momjian.us 1882 : 416 : return false; /* cannot match */
1883 : : }
1884 : : /* else, leave hashkey unmodified, equivalent to hashcode 0 */
1885 : : }
1886 : : else
1887 : : {
1888 : : /* Compute the hash function */
1889 : : uint32 hkey;
1890 : :
1850 peter@eisentraut.org 1891 : 13947202 : hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i], hashtable->collations[i], keyval));
7602 tgl@sss.pgh.pa.us 1892 : 13947202 : hashkey ^= hkey;
1893 : : }
1894 : :
7806 1895 : 13947318 : i++;
1896 : : }
1897 : :
8280 1898 : 13151527 : MemoryContextSwitchTo(oldContext);
1899 : :
6286 1900 : 13151527 : *hashvalue = hashkey;
1901 : 13151527 : return true;
1902 : : }
1903 : :
1904 : : /*
1905 : : * ExecHashGetBucketAndBatch
1906 : : * Determine the bucket number and batch number for a hash value
1907 : : *
1908 : : * Note: on-the-fly increases of nbatch must not change the bucket number
1909 : : * for a given hash code (since we don't move tuples to different hash
1910 : : * chains), and must only cause the batch number to remain the same or
1911 : : * increase. Our algorithm is
1912 : : * bucketno = hashvalue MOD nbuckets
1913 : : * batchno = ROR(hashvalue, log2_nbuckets) MOD nbatch
1914 : : * where nbuckets and nbatch are both expected to be powers of 2, so we can
1915 : : * do the computations by shifting and masking. (This assumes that all hash
1916 : : * functions are good about randomizing all their output bits, else we are
1917 : : * likely to have very skewed bucket or batch occupancy.)
1918 : : *
1919 : : * nbuckets and log2_nbuckets may change while nbatch == 1 because of dynamic
1920 : : * bucket count growth. Once we start batching, the value is fixed and does
1921 : : * not change over the course of the join (making it possible to compute batch
1922 : : * number the way we do here).
1923 : : *
1924 : : * nbatch is always a power of 2; we increase it only by doubling it. This
1925 : : * effectively adds one more bit to the top of the batchno. In very large
1926 : : * joins, we might run out of bits to add, so we do this by rotating the hash
1927 : : * value. This causes batchno to steal bits from bucketno when the number of
1928 : : * virtual buckets exceeds 2^32. It's better to have longer bucket chains
1929 : : * than to lose the ability to divide batches.
1930 : : */
1931 : : void
6979 1932 : 18077715 : ExecHashGetBucketAndBatch(HashJoinTable hashtable,
1933 : : uint32 hashvalue,
1934 : : int *bucketno,
1935 : : int *batchno)
1936 : : {
6756 bruce@momjian.us 1937 : 18077715 : uint32 nbuckets = (uint32) hashtable->nbuckets;
1938 : 18077715 : uint32 nbatch = (uint32) hashtable->nbatch;
1939 : :
6979 tgl@sss.pgh.pa.us 1940 [ + + ]: 18077715 : if (nbatch > 1)
1941 : : {
6162 1942 : 8008493 : *bucketno = hashvalue & (nbuckets - 1);
1573 tmunro@postgresql.or 1943 : 8008493 : *batchno = pg_rotate_right32(hashvalue,
1944 : 8008493 : hashtable->log2_nbuckets) & (nbatch - 1);
1945 : : }
1946 : : else
1947 : : {
6162 tgl@sss.pgh.pa.us 1948 : 10069222 : *bucketno = hashvalue & (nbuckets - 1);
6979 1949 : 10069222 : *batchno = 0;
1950 : : }
7776 1951 : 18077715 : }
1952 : :
1953 : : /*
1954 : : * ExecScanHashBucket
1955 : : * scan a hash bucket for matches to the current outer tuple
1956 : : *
1957 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
1958 : : *
1959 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
1960 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
1961 : : * for the latter.
1962 : : */
1963 : : bool
9715 bruce@momjian.us 1964 : 8699063 : ExecScanHashBucket(HashJoinState *hjstate,
1965 : : ExprContext *econtext)
1966 : : {
2588 andres@anarazel.de 1967 : 8699063 : ExprState *hjclauses = hjstate->hashclauses;
9091 bruce@momjian.us 1968 : 8699063 : HashJoinTable hashtable = hjstate->hj_HashTable;
1969 : 8699063 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
6979 tgl@sss.pgh.pa.us 1970 : 8699063 : uint32 hashvalue = hjstate->hj_CurHashValue;
1971 : :
1972 : : /*
1973 : : * hj_CurTuple is the address of the tuple last returned from the current
1974 : : * bucket, or NULL if it's time to start scanning a new bucket.
1975 : : *
1976 : : * If the tuple hashed to a skew bucket then scan the skew bucket
1977 : : * otherwise scan the standard hashtable bucket.
1978 : : */
5503 1979 [ + + ]: 8699063 : if (hashTuple != NULL)
2307 andres@anarazel.de 1980 : 2093804 : hashTuple = hashTuple->next.unshared;
5503 tgl@sss.pgh.pa.us 1981 [ + + ]: 6605259 : else if (hjstate->hj_CurSkewBucketNo != INVALID_SKEW_BUCKET_NO)
1982 : 1200 : hashTuple = hashtable->skewBucket[hjstate->hj_CurSkewBucketNo]->tuples;
1983 : : else
2307 andres@anarazel.de 1984 : 6604059 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
1985 : :
1986 [ + + ]: 10723537 : while (hashTuple != NULL)
1987 : : {
1988 [ + + ]: 5859787 : if (hashTuple->hashvalue == hashvalue)
1989 : : {
1990 : : TupleTableSlot *inntuple;
1991 : :
1992 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
1993 : 3835316 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
1994 : : hjstate->hj_HashTupleSlot,
1995 : : false); /* do not pfree */
1996 : 3835316 : econtext->ecxt_innertuple = inntuple;
1997 : :
2267 1998 [ + + ]: 3835316 : if (ExecQualAndReset(hjclauses, econtext))
1999 : : {
2307 2000 : 3835313 : hjstate->hj_CurTuple = hashTuple;
2001 : 3835313 : return true;
2002 : : }
2003 : : }
2004 : :
2005 : 2024474 : hashTuple = hashTuple->next.unshared;
2006 : : }
2007 : :
2008 : : /*
2009 : : * no match
2010 : : */
2011 : 4863750 : return false;
2012 : : }
2013 : :
2014 : : /*
2015 : : * ExecParallelScanHashBucket
2016 : : * scan a hash bucket for matches to the current outer tuple
2017 : : *
2018 : : * The current outer tuple must be stored in econtext->ecxt_outertuple.
2019 : : *
2020 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2021 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2022 : : * for the latter.
2023 : : */
2024 : : bool
2025 : 2100027 : ExecParallelScanHashBucket(HashJoinState *hjstate,
2026 : : ExprContext *econtext)
2027 : : {
2028 : 2100027 : ExprState *hjclauses = hjstate->hashclauses;
2029 : 2100027 : HashJoinTable hashtable = hjstate->hj_HashTable;
2030 : 2100027 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2031 : 2100027 : uint32 hashvalue = hjstate->hj_CurHashValue;
2032 : :
2033 : : /*
2034 : : * hj_CurTuple is the address of the tuple last returned from the current
2035 : : * bucket, or NULL if it's time to start scanning a new bucket.
2036 : : */
2037 [ + + ]: 2100027 : if (hashTuple != NULL)
2038 : 1020012 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2039 : : else
2040 : 1080015 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2041 : : hjstate->hj_CurBucketNo);
2042 : :
9098 tgl@sss.pgh.pa.us 2043 [ + + ]: 2805397 : while (hashTuple != NULL)
2044 : : {
6979 2045 [ + + ]: 1725382 : if (hashTuple->hashvalue == hashvalue)
2046 : : {
2047 : : TupleTableSlot *inntuple;
2048 : :
2049 : : /* insert hashtable's tuple into exec slot so ExecQual sees it */
6501 2050 : 1020012 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2051 : : hjstate->hj_HashTupleSlot,
2052 : : false); /* do not pfree */
6979 2053 : 1020012 : econtext->ecxt_innertuple = inntuple;
2054 : :
2267 andres@anarazel.de 2055 [ + - ]: 1020012 : if (ExecQualAndReset(hjclauses, econtext))
2056 : : {
6979 tgl@sss.pgh.pa.us 2057 : 1020012 : hjstate->hj_CurTuple = hashTuple;
4854 2058 : 1020012 : return true;
2059 : : }
2060 : : }
2061 : :
2307 andres@anarazel.de 2062 : 705370 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2063 : : }
2064 : :
2065 : : /*
2066 : : * no match
2067 : : */
4854 tgl@sss.pgh.pa.us 2068 : 1080015 : return false;
2069 : : }
2070 : :
2071 : : /*
2072 : : * ExecPrepHashTableForUnmatched
2073 : : * set up for a series of ExecScanHashTableForUnmatched calls
2074 : : */
2075 : : void
2076 : 2550 : ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
2077 : : {
2078 : : /*----------
2079 : : * During this scan we use the HashJoinState fields as follows:
2080 : : *
2081 : : * hj_CurBucketNo: next regular bucket to scan
2082 : : * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
2083 : : * hj_CurTuple: last tuple returned, or NULL to start next bucket
2084 : : *----------
2085 : : */
2086 : 2550 : hjstate->hj_CurBucketNo = 0;
2087 : 2550 : hjstate->hj_CurSkewBucketNo = 0;
2088 : 2550 : hjstate->hj_CurTuple = NULL;
2089 : 2550 : }
2090 : :
2091 : : /*
2092 : : * Decide if this process is allowed to run the unmatched scan. If so, the
2093 : : * batch barrier is advanced to PHJ_BATCH_SCAN and true is returned.
2094 : : * Otherwise the batch is detached and false is returned.
2095 : : */
2096 : : bool
380 tmunro@postgresql.or 2097 : 54 : ExecParallelPrepHashTableForUnmatched(HashJoinState *hjstate)
2098 : : {
2099 : 54 : HashJoinTable hashtable = hjstate->hj_HashTable;
2100 : 54 : int curbatch = hashtable->curbatch;
2101 : 54 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
2102 : :
2103 [ - + ]: 54 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE);
2104 : :
2105 : : /*
2106 : : * It would not be deadlock-free to wait on the batch barrier, because it
2107 : : * is in PHJ_BATCH_PROBE phase, and thus processes attached to it have
2108 : : * already emitted tuples. Therefore, we'll hold a wait-free election:
2109 : : * only one process can continue to the next phase, and all others detach
2110 : : * from this batch. They can still go any work on other batches, if there
2111 : : * are any.
2112 : : */
2113 [ + + ]: 54 : if (!BarrierArriveAndDetachExceptLast(&batch->batch_barrier))
2114 : : {
2115 : : /* This process considers the batch to be done. */
2116 : 21 : hashtable->batches[hashtable->curbatch].done = true;
2117 : :
2118 : : /* Make sure any temporary files are closed. */
2119 : 21 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
2120 : 21 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
2121 : :
2122 : : /*
2123 : : * Track largest batch we've seen, which would normally happen in
2124 : : * ExecHashTableDetachBatch().
2125 : : */
2126 : 21 : hashtable->spacePeak =
2127 : 21 : Max(hashtable->spacePeak,
2128 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
2129 : 21 : hashtable->curbatch = -1;
2130 : 21 : return false;
2131 : : }
2132 : :
2133 : : /* Now we are alone with this batch. */
2134 [ - + ]: 33 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
2135 : :
2136 : : /*
2137 : : * Has another process decided to give up early and command all processes
2138 : : * to skip the unmatched scan?
2139 : : */
2140 [ - + ]: 33 : if (batch->skip_unmatched)
2141 : : {
380 tmunro@postgresql.or 2142 :UBC 0 : hashtable->batches[hashtable->curbatch].done = true;
2143 : 0 : ExecHashTableDetachBatch(hashtable);
2144 : 0 : return false;
2145 : : }
2146 : :
2147 : : /* Now prepare the process local state, just as for non-parallel join. */
380 tmunro@postgresql.or 2148 :CBC 33 : ExecPrepHashTableForUnmatched(hjstate);
2149 : :
2150 : 33 : return true;
2151 : : }
2152 : :
2153 : : /*
2154 : : * ExecScanHashTableForUnmatched
2155 : : * scan the hash table for unmatched inner tuples
2156 : : *
2157 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2158 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2159 : : * for the latter.
2160 : : */
2161 : : bool
4854 tgl@sss.pgh.pa.us 2162 : 176113 : ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
2163 : : {
2164 : 176113 : HashJoinTable hashtable = hjstate->hj_HashTable;
2165 : 176113 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2166 : :
2167 : : for (;;)
2168 : : {
2169 : : /*
2170 : : * hj_CurTuple is the address of the tuple last returned from the
2171 : : * current bucket, or NULL if it's time to start scanning a new
2172 : : * bucket.
2173 : : */
2174 [ + + ]: 3270883 : if (hashTuple != NULL)
2307 andres@anarazel.de 2175 : 173596 : hashTuple = hashTuple->next.unshared;
4854 tgl@sss.pgh.pa.us 2176 [ + + ]: 3097287 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2177 : : {
2307 andres@anarazel.de 2178 : 3094776 : hashTuple = hashtable->buckets.unshared[hjstate->hj_CurBucketNo];
4854 tgl@sss.pgh.pa.us 2179 : 3094776 : hjstate->hj_CurBucketNo++;
2180 : : }
2181 [ - + ]: 2511 : else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
2182 : : {
4753 bruce@momjian.us 2183 :UBC 0 : int j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
2184 : :
4854 tgl@sss.pgh.pa.us 2185 : 0 : hashTuple = hashtable->skewBucket[j]->tuples;
2186 : 0 : hjstate->hj_CurSkewBucketNo++;
2187 : : }
2188 : : else
4854 tgl@sss.pgh.pa.us 2189 :CBC 2511 : break; /* finished all buckets */
2190 : :
2191 [ + + ]: 3452129 : while (hashTuple != NULL)
2192 : : {
2193 [ + + ]: 357359 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2194 : : {
2195 : : TupleTableSlot *inntuple;
2196 : :
2197 : : /* insert hashtable's tuple into exec slot */
2198 : 173602 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2199 : : hjstate->hj_HashTupleSlot,
2200 : : false); /* do not pfree */
2201 : 173602 : econtext->ecxt_innertuple = inntuple;
2202 : :
2203 : : /*
2204 : : * Reset temp memory each time; although this function doesn't
2205 : : * do any qual eval, the caller will, so let's keep it
2206 : : * parallel to ExecScanHashBucket.
2207 : : */
2208 : 173602 : ResetExprContext(econtext);
2209 : :
2210 : 173602 : hjstate->hj_CurTuple = hashTuple;
2211 : 173602 : return true;
2212 : : }
2213 : :
2307 andres@anarazel.de 2214 : 183757 : hashTuple = hashTuple->next.unshared;
2215 : : }
2216 : :
2217 : : /* allow this loop to be cancellable */
2455 2218 [ - + ]: 3094770 : CHECK_FOR_INTERRUPTS();
2219 : : }
2220 : :
2221 : : /*
2222 : : * no more unmatched tuples
2223 : : */
4854 tgl@sss.pgh.pa.us 2224 : 2511 : return false;
2225 : : }
2226 : :
2227 : : /*
2228 : : * ExecParallelScanHashTableForUnmatched
2229 : : * scan the hash table for unmatched inner tuples, in parallel join
2230 : : *
2231 : : * On success, the inner tuple is stored into hjstate->hj_CurTuple and
2232 : : * econtext->ecxt_innertuple, using hjstate->hj_HashTupleSlot as the slot
2233 : : * for the latter.
2234 : : */
2235 : : bool
380 tmunro@postgresql.or 2236 : 60036 : ExecParallelScanHashTableForUnmatched(HashJoinState *hjstate,
2237 : : ExprContext *econtext)
2238 : : {
2239 : 60036 : HashJoinTable hashtable = hjstate->hj_HashTable;
2240 : 60036 : HashJoinTuple hashTuple = hjstate->hj_CurTuple;
2241 : :
2242 : : for (;;)
2243 : : {
2244 : : /*
2245 : : * hj_CurTuple is the address of the tuple last returned from the
2246 : : * current bucket, or NULL if it's time to start scanning a new
2247 : : * bucket.
2248 : : */
2249 [ + + ]: 367236 : if (hashTuple != NULL)
2250 : 60003 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2251 [ + + ]: 307233 : else if (hjstate->hj_CurBucketNo < hashtable->nbuckets)
2252 : 307200 : hashTuple = ExecParallelHashFirstTuple(hashtable,
2253 : 307200 : hjstate->hj_CurBucketNo++);
2254 : : else
2255 : 33 : break; /* finished all buckets */
2256 : :
2257 [ + + ]: 487203 : while (hashTuple != NULL)
2258 : : {
2259 [ + + ]: 180003 : if (!HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(hashTuple)))
2260 : : {
2261 : : TupleTableSlot *inntuple;
2262 : :
2263 : : /* insert hashtable's tuple into exec slot */
2264 : 60003 : inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
2265 : : hjstate->hj_HashTupleSlot,
2266 : : false); /* do not pfree */
2267 : 60003 : econtext->ecxt_innertuple = inntuple;
2268 : :
2269 : : /*
2270 : : * Reset temp memory each time; although this function doesn't
2271 : : * do any qual eval, the caller will, so let's keep it
2272 : : * parallel to ExecScanHashBucket.
2273 : : */
2274 : 60003 : ResetExprContext(econtext);
2275 : :
2276 : 60003 : hjstate->hj_CurTuple = hashTuple;
2277 : 60003 : return true;
2278 : : }
2279 : :
2280 : 120000 : hashTuple = ExecParallelHashNextTuple(hashtable, hashTuple);
2281 : : }
2282 : :
2283 : : /* allow this loop to be cancellable */
2284 [ - + ]: 307200 : CHECK_FOR_INTERRUPTS();
2285 : : }
2286 : :
2287 : : /*
2288 : : * no more unmatched tuples
2289 : : */
2290 : 33 : return false;
2291 : : }
2292 : :
2293 : : /*
2294 : : * ExecHashTableReset
2295 : : *
2296 : : * reset hash table header for new batch
2297 : : */
2298 : : void
6979 tgl@sss.pgh.pa.us 2299 : 813 : ExecHashTableReset(HashJoinTable hashtable)
2300 : : {
2301 : : MemoryContext oldcxt;
9098 2302 : 813 : int nbuckets = hashtable->nbuckets;
2303 : :
2304 : : /*
2305 : : * Release all the hash buckets and tuples acquired in the prior pass, and
2306 : : * reinitialize the context for a new pass.
2307 : : */
8691 2308 : 813 : MemoryContextReset(hashtable->batchCxt);
9098 2309 : 813 : oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
2310 : :
2311 : : /* Reallocate and reinitialize the hash bucket headers. */
580 peter@eisentraut.org 2312 : 813 : hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
2313 : :
6979 tgl@sss.pgh.pa.us 2314 : 813 : hashtable->spaceUsed = 0;
2315 : :
9098 2316 : 813 : MemoryContextSwitchTo(oldcxt);
2317 : :
2318 : : /* Forget the chunks (the memory was freed by the context reset above). */
3504 heikki.linnakangas@i 2319 : 813 : hashtable->chunks = NULL;
10141 scrappy@hub.org 2320 : 813 : }
2321 : :
2322 : : /*
2323 : : * ExecHashTableResetMatchFlags
2324 : : * Clear all the HeapTupleHeaderHasMatch flags in the table
2325 : : */
2326 : : void
4854 tgl@sss.pgh.pa.us 2327 : 6 : ExecHashTableResetMatchFlags(HashJoinTable hashtable)
2328 : : {
2329 : : HashJoinTuple tuple;
2330 : : int i;
2331 : :
2332 : : /* Reset all flags in the main table ... */
2333 [ + + ]: 6150 : for (i = 0; i < hashtable->nbuckets; i++)
2334 : : {
2307 andres@anarazel.de 2335 [ + + ]: 6177 : for (tuple = hashtable->buckets.unshared[i]; tuple != NULL;
2336 : 33 : tuple = tuple->next.unshared)
4854 tgl@sss.pgh.pa.us 2337 : 33 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2338 : : }
2339 : :
2340 : : /* ... and the same for the skew buckets, if any */
2341 [ - + ]: 6 : for (i = 0; i < hashtable->nSkewBuckets; i++)
2342 : : {
4753 bruce@momjian.us 2343 :UBC 0 : int j = hashtable->skewBucketNums[i];
4854 tgl@sss.pgh.pa.us 2344 : 0 : HashSkewBucket *skewBucket = hashtable->skewBucket[j];
2345 : :
2307 andres@anarazel.de 2346 [ # # ]: 0 : for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next.unshared)
4854 tgl@sss.pgh.pa.us 2347 : 0 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(tuple));
2348 : : }
4854 tgl@sss.pgh.pa.us 2349 :CBC 6 : }
2350 : :
2351 : :
2352 : : void
5025 2353 : 613 : ExecReScanHash(HashState *node)
2354 : : {
647 2355 : 613 : PlanState *outerPlan = outerPlanState(node);
2356 : :
2357 : : /*
2358 : : * if chgParam of subnode is not null then plan will be re-scanned by
2359 : : * first ExecProcNode.
2360 : : */
2361 [ + + ]: 613 : if (outerPlan->chgParam == NULL)
2362 : 15 : ExecReScan(outerPlan);
9557 vadim4o@yahoo.com 2363 : 613 : }
2364 : :
2365 : :
2366 : : /*
2367 : : * ExecHashBuildSkewHash
2368 : : *
2369 : : * Set up for skew optimization if we can identify the most common values
2370 : : * (MCVs) of the outer relation's join key. We make a skew hash bucket
2371 : : * for the hash value of each MCV, up to the number of slots allowed
2372 : : * based on available memory.
2373 : : */
2374 : : static void
5503 tgl@sss.pgh.pa.us 2375 : 63 : ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
2376 : : {
2377 : : HeapTupleData *statsTuple;
2378 : : AttStatsSlot sslot;
2379 : :
2380 : : /* Do nothing if planner didn't identify the outer relation's join key */
2381 [ - + ]: 63 : if (!OidIsValid(node->skewTable))
5503 tgl@sss.pgh.pa.us 2382 :UBC 0 : return;
2383 : : /* Also, do nothing if we don't have room for at least one skew bucket */
5503 tgl@sss.pgh.pa.us 2384 [ - + ]:CBC 63 : if (mcvsToUse <= 0)
5503 tgl@sss.pgh.pa.us 2385 :UBC 0 : return;
2386 : :
2387 : : /*
2388 : : * Try to find the MCV statistics for the outer relation's join key.
2389 : : */
5173 rhaas@postgresql.org 2390 :CBC 63 : statsTuple = SearchSysCache3(STATRELATTINH,
2391 : : ObjectIdGetDatum(node->skewTable),
2392 : 63 : Int16GetDatum(node->skewColumn),
2393 : 63 : BoolGetDatum(node->skewInherit));
5503 tgl@sss.pgh.pa.us 2394 [ + + ]: 63 : if (!HeapTupleIsValid(statsTuple))
2395 : 36 : return;
2396 : :
2528 2397 [ + + ]: 27 : if (get_attstatsslot(&sslot, statsTuple,
2398 : : STATISTIC_KIND_MCV, InvalidOid,
2399 : : ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS))
2400 : : {
2401 : : double frac;
2402 : : int nbuckets;
2403 : : FmgrInfo *hashfunctions;
2404 : : int i;
2405 : :
2406 [ - + ]: 3 : if (mcvsToUse > sslot.nvalues)
2528 tgl@sss.pgh.pa.us 2407 :UBC 0 : mcvsToUse = sslot.nvalues;
2408 : :
2409 : : /*
2410 : : * Calculate the expected fraction of outer relation that will
2411 : : * participate in the skew optimization. If this isn't at least
2412 : : * SKEW_MIN_OUTER_FRACTION, don't use skew optimization.
2413 : : */
5503 tgl@sss.pgh.pa.us 2414 :CBC 3 : frac = 0;
2415 [ + + ]: 66 : for (i = 0; i < mcvsToUse; i++)
2528 2416 : 63 : frac += sslot.numbers[i];
5503 2417 [ - + ]: 3 : if (frac < SKEW_MIN_OUTER_FRACTION)
2418 : : {
2528 tgl@sss.pgh.pa.us 2419 :UBC 0 : free_attstatsslot(&sslot);
5503 2420 : 0 : ReleaseSysCache(statsTuple);
2421 : 0 : return;
2422 : : }
2423 : :
2424 : : /*
2425 : : * Okay, set up the skew hashtable.
2426 : : *
2427 : : * skewBucket[] is an open addressing hashtable with a power of 2 size
2428 : : * that is greater than the number of MCV values. (This ensures there
2429 : : * will be at least one null entry, so searches will always
2430 : : * terminate.)
2431 : : *
2432 : : * Note: this code could fail if mcvsToUse exceeds INT_MAX/8 or
2433 : : * MaxAllocSize/sizeof(void *)/8, but that is not currently possible
2434 : : * since we limit pg_statistic entries to much less than that.
2435 : : */
1467 drowley@postgresql.o 2436 :CBC 3 : nbuckets = pg_nextpower2_32(mcvsToUse + 1);
2437 : : /* use two more bits just to help avoid collisions */
5503 tgl@sss.pgh.pa.us 2438 : 3 : nbuckets <<= 2;
2439 : :
2440 : 3 : hashtable->skewEnabled = true;
2441 : 3 : hashtable->skewBucketLen = nbuckets;
2442 : :
2443 : : /*
2444 : : * We allocate the bucket memory in the hashtable's batch context. It
2445 : : * is only needed during the first batch, and this ensures it will be
2446 : : * automatically removed once the first batch is done.
2447 : : */
2448 : 3 : hashtable->skewBucket = (HashSkewBucket **)
2449 : 3 : MemoryContextAllocZero(hashtable->batchCxt,
2450 : : nbuckets * sizeof(HashSkewBucket *));
2451 : 3 : hashtable->skewBucketNums = (int *)
2452 : 3 : MemoryContextAllocZero(hashtable->batchCxt,
2453 : : mcvsToUse * sizeof(int));
2454 : :
2455 : 3 : hashtable->spaceUsed += nbuckets * sizeof(HashSkewBucket *)
2456 : 3 : + mcvsToUse * sizeof(int);
2457 : 3 : hashtable->spaceUsedSkew += nbuckets * sizeof(HashSkewBucket *)
2458 : 3 : + mcvsToUse * sizeof(int);
5186 rhaas@postgresql.org 2459 [ + - ]: 3 : if (hashtable->spaceUsed > hashtable->spacePeak)
2460 : 3 : hashtable->spacePeak = hashtable->spaceUsed;
2461 : :
2462 : : /*
2463 : : * Create a skew bucket for each MCV hash value.
2464 : : *
2465 : : * Note: it is very important that we create the buckets in order of
2466 : : * decreasing MCV frequency. If we have to remove some buckets, they
2467 : : * must be removed in reverse order of creation (see notes in
2468 : : * ExecHashRemoveNextSkewBucket) and we want the least common MCVs to
2469 : : * be removed first.
2470 : : */
5503 tgl@sss.pgh.pa.us 2471 : 3 : hashfunctions = hashtable->outer_hashfunctions;
2472 : :
2473 [ + + ]: 66 : for (i = 0; i < mcvsToUse; i++)
2474 : : {
2475 : : uint32 hashvalue;
2476 : : int bucket;
2477 : :
1850 peter@eisentraut.org 2478 : 63 : hashvalue = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[0],
2479 : 63 : hashtable->collations[0],
2480 : 63 : sslot.values[i]));
2481 : :
2482 : : /*
2483 : : * While we have not hit a hole in the hashtable and have not hit
2484 : : * the desired bucket, we have collided with some previous hash
2485 : : * value, so try the next bucket location. NB: this code must
2486 : : * match ExecHashGetSkewBucket.
2487 : : */
5503 tgl@sss.pgh.pa.us 2488 : 63 : bucket = hashvalue & (nbuckets - 1);
2489 [ - + ]: 63 : while (hashtable->skewBucket[bucket] != NULL &&
5503 tgl@sss.pgh.pa.us 2490 [ # # ]:UBC 0 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2491 : 0 : bucket = (bucket + 1) & (nbuckets - 1);
2492 : :
2493 : : /*
2494 : : * If we found an existing bucket with the same hashvalue, leave
2495 : : * it alone. It's okay for two MCVs to share a hashvalue.
2496 : : */
5503 tgl@sss.pgh.pa.us 2497 [ - + ]:CBC 63 : if (hashtable->skewBucket[bucket] != NULL)
5503 tgl@sss.pgh.pa.us 2498 :UBC 0 : continue;
2499 : :
2500 : : /* Okay, create a new skew bucket for this hashvalue. */
5503 tgl@sss.pgh.pa.us 2501 :CBC 126 : hashtable->skewBucket[bucket] = (HashSkewBucket *)
2502 : 63 : MemoryContextAlloc(hashtable->batchCxt,
2503 : : sizeof(HashSkewBucket));
2504 : 63 : hashtable->skewBucket[bucket]->hashvalue = hashvalue;
2505 : 63 : hashtable->skewBucket[bucket]->tuples = NULL;
2506 : 63 : hashtable->skewBucketNums[hashtable->nSkewBuckets] = bucket;
2507 : 63 : hashtable->nSkewBuckets++;
2508 : 63 : hashtable->spaceUsed += SKEW_BUCKET_OVERHEAD;
2509 : 63 : hashtable->spaceUsedSkew += SKEW_BUCKET_OVERHEAD;
5186 rhaas@postgresql.org 2510 [ + - ]: 63 : if (hashtable->spaceUsed > hashtable->spacePeak)
2511 : 63 : hashtable->spacePeak = hashtable->spaceUsed;
2512 : : }
2513 : :
2528 tgl@sss.pgh.pa.us 2514 : 3 : free_attstatsslot(&sslot);
2515 : : }
2516 : :
5503 2517 : 27 : ReleaseSysCache(statsTuple);
2518 : : }
2519 : :
2520 : : /*
2521 : : * ExecHashGetSkewBucket
2522 : : *
2523 : : * Returns the index of the skew bucket for this hashvalue,
2524 : : * or INVALID_SKEW_BUCKET_NO if the hashvalue is not
2525 : : * associated with any active skew bucket.
2526 : : */
2527 : : int
2528 : 12806560 : ExecHashGetSkewBucket(HashJoinTable hashtable, uint32 hashvalue)
2529 : : {
2530 : : int bucket;
2531 : :
2532 : : /*
2533 : : * Always return INVALID_SKEW_BUCKET_NO if not doing skew optimization (in
2534 : : * particular, this happens after the initial batch is done).
2535 : : */
2536 [ + + ]: 12806560 : if (!hashtable->skewEnabled)
2537 : 12746560 : return INVALID_SKEW_BUCKET_NO;
2538 : :
2539 : : /*
2540 : : * Since skewBucketLen is a power of 2, we can do a modulo by ANDing.
2541 : : */
2542 : 60000 : bucket = hashvalue & (hashtable->skewBucketLen - 1);
2543 : :
2544 : : /*
2545 : : * While we have not hit a hole in the hashtable and have not hit the
2546 : : * desired bucket, we have collided with some other hash value, so try the
2547 : : * next bucket location.
2548 : : */
2549 [ + + ]: 63915 : while (hashtable->skewBucket[bucket] != NULL &&
2550 [ + + ]: 5409 : hashtable->skewBucket[bucket]->hashvalue != hashvalue)
2551 : 3915 : bucket = (bucket + 1) & (hashtable->skewBucketLen - 1);
2552 : :
2553 : : /*
2554 : : * Found the desired bucket?
2555 : : */
2556 [ + + ]: 60000 : if (hashtable->skewBucket[bucket] != NULL)
2557 : 1494 : return bucket;
2558 : :
2559 : : /*
2560 : : * There must not be any hashtable entry for this hash value.
2561 : : */
2562 : 58506 : return INVALID_SKEW_BUCKET_NO;
2563 : : }
2564 : :
2565 : : /*
2566 : : * ExecHashSkewTableInsert
2567 : : *
2568 : : * Insert a tuple into the skew hashtable.
2569 : : *
2570 : : * This should generally match up with the current-batch case in
2571 : : * ExecHashTableInsert.
2572 : : */
2573 : : static void
2574 : 294 : ExecHashSkewTableInsert(HashJoinTable hashtable,
2575 : : TupleTableSlot *slot,
2576 : : uint32 hashvalue,
2577 : : int bucketNumber)
2578 : : {
2579 : : bool shouldFree;
1977 andres@anarazel.de 2580 : 294 : MinimalTuple tuple = ExecFetchSlotMinimalTuple(slot, &shouldFree);
2581 : : HashJoinTuple hashTuple;
2582 : : int hashTupleSize;
2583 : :
2584 : : /* Create the HashJoinTuple */
5503 tgl@sss.pgh.pa.us 2585 : 294 : hashTupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2586 : 294 : hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
2587 : : hashTupleSize);
2588 : 294 : hashTuple->hashvalue = hashvalue;
2589 : 294 : memcpy(HJTUPLE_MINTUPLE(hashTuple), tuple, tuple->t_len);
4854 2590 : 294 : HeapTupleHeaderClearMatch(HJTUPLE_MINTUPLE(hashTuple));
2591 : :
2592 : : /* Push it onto the front of the skew bucket's list */
2307 andres@anarazel.de 2593 : 294 : hashTuple->next.unshared = hashtable->skewBucket[bucketNumber]->tuples;
5503 tgl@sss.pgh.pa.us 2594 : 294 : hashtable->skewBucket[bucketNumber]->tuples = hashTuple;
2307 andres@anarazel.de 2595 [ - + ]: 294 : Assert(hashTuple != hashTuple->next.unshared);
2596 : :
2597 : : /* Account for space used, and back off if we've used too much */
5503 tgl@sss.pgh.pa.us 2598 : 294 : hashtable->spaceUsed += hashTupleSize;
2599 : 294 : hashtable->spaceUsedSkew += hashTupleSize;
5186 rhaas@postgresql.org 2600 [ + + ]: 294 : if (hashtable->spaceUsed > hashtable->spacePeak)
2601 : 216 : hashtable->spacePeak = hashtable->spaceUsed;
5503 tgl@sss.pgh.pa.us 2602 [ + + ]: 345 : while (hashtable->spaceUsedSkew > hashtable->spaceAllowedSkew)
2603 : 51 : ExecHashRemoveNextSkewBucket(hashtable);
2604 : :
2605 : : /* Check we are not over the total spaceAllowed, either */
2606 [ - + ]: 294 : if (hashtable->spaceUsed > hashtable->spaceAllowed)
5503 tgl@sss.pgh.pa.us 2607 :UBC 0 : ExecHashIncreaseNumBatches(hashtable);
2608 : :
1977 andres@anarazel.de 2609 [ + - ]:CBC 294 : if (shouldFree)
2610 : 294 : heap_free_minimal_tuple(tuple);
5503 tgl@sss.pgh.pa.us 2611 : 294 : }
2612 : :
2613 : : /*
2614 : : * ExecHashRemoveNextSkewBucket
2615 : : *
2616 : : * Remove the least valuable skew bucket by pushing its tuples into
2617 : : * the main hash table.
2618 : : */
2619 : : static void
2620 : 51 : ExecHashRemoveNextSkewBucket(HashJoinTable hashtable)
2621 : : {
2622 : : int bucketToRemove;
2623 : : HashSkewBucket *bucket;
2624 : : uint32 hashvalue;
2625 : : int bucketno;
2626 : : int batchno;
2627 : : HashJoinTuple hashTuple;
2628 : :
2629 : : /* Locate the bucket to remove */
2630 : 51 : bucketToRemove = hashtable->skewBucketNums[hashtable->nSkewBuckets - 1];
2631 : 51 : bucket = hashtable->skewBucket[bucketToRemove];
2632 : :
2633 : : /*
2634 : : * Calculate which bucket and batch the tuples belong to in the main
2635 : : * hashtable. They all have the same hash value, so it's the same for all
2636 : : * of them. Also note that it's not possible for nbatch to increase while
2637 : : * we are processing the tuples.
2638 : : */
2639 : 51 : hashvalue = bucket->hashvalue;
2640 : 51 : ExecHashGetBucketAndBatch(hashtable, hashvalue, &bucketno, &batchno);
2641 : :
2642 : : /* Process all tuples in the bucket */
2643 : 51 : hashTuple = bucket->tuples;
2644 [ + + ]: 225 : while (hashTuple != NULL)
2645 : : {
2307 andres@anarazel.de 2646 : 174 : HashJoinTuple nextHashTuple = hashTuple->next.unshared;
2647 : : MinimalTuple tuple;
2648 : : Size tupleSize;
2649 : :
2650 : : /*
2651 : : * This code must agree with ExecHashTableInsert. We do not use
2652 : : * ExecHashTableInsert directly as ExecHashTableInsert expects a
2653 : : * TupleTableSlot while we already have HashJoinTuples.
2654 : : */
5503 tgl@sss.pgh.pa.us 2655 : 174 : tuple = HJTUPLE_MINTUPLE(hashTuple);
2656 : 174 : tupleSize = HJTUPLE_OVERHEAD + tuple->t_len;
2657 : :
2658 : : /* Decide whether to put the tuple in the hash table or a temp file */
2659 [ + + ]: 174 : if (batchno == hashtable->curbatch)
2660 : : {
2661 : : /* Move the tuple to the main hash table */
2662 : : HashJoinTuple copyTuple;
2663 : :
2664 : : /*
2665 : : * We must copy the tuple into the dense storage, else it will not
2666 : : * be found by, eg, ExecHashIncreaseNumBatches.
2667 : : */
2989 2668 : 69 : copyTuple = (HashJoinTuple) dense_alloc(hashtable, tupleSize);
2669 : 69 : memcpy(copyTuple, hashTuple, tupleSize);
2670 : 69 : pfree(hashTuple);
2671 : :
2307 andres@anarazel.de 2672 : 69 : copyTuple->next.unshared = hashtable->buckets.unshared[bucketno];
2673 : 69 : hashtable->buckets.unshared[bucketno] = copyTuple;
2674 : :
2675 : : /* We have reduced skew space, but overall space doesn't change */
5503 tgl@sss.pgh.pa.us 2676 : 69 : hashtable->spaceUsedSkew -= tupleSize;
2677 : : }
2678 : : else
2679 : : {
2680 : : /* Put the tuple into a temp file for later batches */
2681 [ - + ]: 105 : Assert(batchno > hashtable->curbatch);
2682 : 105 : ExecHashJoinSaveTuple(tuple, hashvalue,
331 tomas.vondra@postgre 2683 : 105 : &hashtable->innerBatchFile[batchno],
2684 : : hashtable);
5503 tgl@sss.pgh.pa.us 2685 : 105 : pfree(hashTuple);
2686 : 105 : hashtable->spaceUsed -= tupleSize;
2687 : 105 : hashtable->spaceUsedSkew -= tupleSize;
2688 : : }
2689 : :
2690 : 174 : hashTuple = nextHashTuple;
2691 : :
2692 : : /* allow this loop to be cancellable */
2615 2693 [ - + ]: 174 : CHECK_FOR_INTERRUPTS();
2694 : : }
2695 : :
2696 : : /*
2697 : : * Free the bucket struct itself and reset the hashtable entry to NULL.
2698 : : *
2699 : : * NOTE: this is not nearly as simple as it looks on the surface, because
2700 : : * of the possibility of collisions in the hashtable. Suppose that hash
2701 : : * values A and B collide at a particular hashtable entry, and that A was
2702 : : * entered first so B gets shifted to a different table entry. If we were
2703 : : * to remove A first then ExecHashGetSkewBucket would mistakenly start
2704 : : * reporting that B is not in the hashtable, because it would hit the NULL
2705 : : * before finding B. However, we always remove entries in the reverse
2706 : : * order of creation, so this failure cannot happen.
2707 : : */
5503 2708 : 51 : hashtable->skewBucket[bucketToRemove] = NULL;
2709 : 51 : hashtable->nSkewBuckets--;
2710 : 51 : pfree(bucket);
2711 : 51 : hashtable->spaceUsed -= SKEW_BUCKET_OVERHEAD;
2712 : 51 : hashtable->spaceUsedSkew -= SKEW_BUCKET_OVERHEAD;
2713 : :
2714 : : /*
2715 : : * If we have removed all skew buckets then give up on skew optimization.
2716 : : * Release the arrays since they aren't useful any more.
2717 : : */
2718 [ - + ]: 51 : if (hashtable->nSkewBuckets == 0)
2719 : : {
5503 tgl@sss.pgh.pa.us 2720 :UBC 0 : hashtable->skewEnabled = false;
2721 : 0 : pfree(hashtable->skewBucket);
2722 : 0 : pfree(hashtable->skewBucketNums);
2723 : 0 : hashtable->skewBucket = NULL;
2724 : 0 : hashtable->skewBucketNums = NULL;
2725 : 0 : hashtable->spaceUsed -= hashtable->spaceUsedSkew;
2726 : 0 : hashtable->spaceUsedSkew = 0;
2727 : : }
5503 tgl@sss.pgh.pa.us 2728 :CBC 51 : }
2729 : :
2730 : : /*
2731 : : * Reserve space in the DSM segment for instrumentation data.
2732 : : */
2733 : : void
2322 andres@anarazel.de 2734 : 96 : ExecHashEstimate(HashState *node, ParallelContext *pcxt)
2735 : : {
2736 : : size_t size;
2737 : :
2738 : : /* don't need this if not instrumenting or no workers */
2261 tgl@sss.pgh.pa.us 2739 [ + + - + ]: 96 : if (!node->ps.instrument || pcxt->nworkers == 0)
2740 : 54 : return;
2741 : :
2322 andres@anarazel.de 2742 : 42 : size = mul_size(pcxt->nworkers, sizeof(HashInstrumentation));
2743 : 42 : size = add_size(size, offsetof(SharedHashInfo, hinstrument));
2744 : 42 : shm_toc_estimate_chunk(&pcxt->estimator, size);
2745 : 42 : shm_toc_estimate_keys(&pcxt->estimator, 1);
2746 : : }
2747 : :
2748 : : /*
2749 : : * Set up a space in the DSM for all workers to record instrumentation data
2750 : : * about their hash table.
2751 : : */
2752 : : void
2753 : 96 : ExecHashInitializeDSM(HashState *node, ParallelContext *pcxt)
2754 : : {
2755 : : size_t size;
2756 : :
2757 : : /* don't need this if not instrumenting or no workers */
2261 tgl@sss.pgh.pa.us 2758 [ + + - + ]: 96 : if (!node->ps.instrument || pcxt->nworkers == 0)
2759 : 54 : return;
2760 : :
2322 andres@anarazel.de 2761 : 42 : size = offsetof(SharedHashInfo, hinstrument) +
2762 : 42 : pcxt->nworkers * sizeof(HashInstrumentation);
2763 : 42 : node->shared_info = (SharedHashInfo *) shm_toc_allocate(pcxt->toc, size);
2764 : :
2765 : : /* Each per-worker area must start out as zeroes. */
2766 : 42 : memset(node->shared_info, 0, size);
2767 : :
2768 : 42 : node->shared_info->num_workers = pcxt->nworkers;
2769 : 42 : shm_toc_insert(pcxt->toc, node->ps.plan->plan_node_id,
2770 : 42 : node->shared_info);
2771 : : }
2772 : :
2773 : : /*
2774 : : * Locate the DSM space for hash table instrumentation data that we'll write
2775 : : * to at shutdown time.
2776 : : */
2777 : : void
2778 : 273 : ExecHashInitializeWorker(HashState *node, ParallelWorkerContext *pwcxt)
2779 : : {
2780 : : SharedHashInfo *shared_info;
2781 : :
2782 : : /* don't need this if not instrumenting */
2261 tgl@sss.pgh.pa.us 2783 [ + + ]: 273 : if (!node->ps.instrument)
2784 : 147 : return;
2785 : :
2786 : : /*
2787 : : * Find our entry in the shared area, and set up a pointer to it so that
2788 : : * we'll accumulate stats there when shutting down or rebuilding the hash
2789 : : * table.
2790 : : */
2791 : : shared_info = (SharedHashInfo *)
2792 : 126 : shm_toc_lookup(pwcxt->toc, node->ps.plan->plan_node_id, false);
2793 : 126 : node->hinstrument = &shared_info->hinstrument[ParallelWorkerNumber];
2794 : : }
2795 : :
2796 : : /*
2797 : : * Collect EXPLAIN stats if needed, saving them into DSM memory if
2798 : : * ExecHashInitializeWorker was called, or local storage if not. In the
2799 : : * parallel case, this must be done in ExecShutdownHash() rather than
2800 : : * ExecEndHash() because the latter runs after we've detached from the DSM
2801 : : * segment.
2802 : : */
2803 : : void
2322 andres@anarazel.de 2804 : 13175 : ExecShutdownHash(HashState *node)
2805 : : {
2806 : : /* Allocate save space if EXPLAIN'ing and we didn't do so already */
1464 tgl@sss.pgh.pa.us 2807 [ + + + + ]: 13175 : if (node->ps.instrument && !node->hinstrument)
580 peter@eisentraut.org 2808 : 54 : node->hinstrument = palloc0_object(HashInstrumentation);
2809 : : /* Now accumulate data for the current (final) hash table */
2322 andres@anarazel.de 2810 [ + + + + ]: 13175 : if (node->hinstrument && node->hashtable)
1464 tgl@sss.pgh.pa.us 2811 : 165 : ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
2322 andres@anarazel.de 2812 : 13175 : }
2813 : :
2814 : : /*
2815 : : * Retrieve instrumentation data from workers before the DSM segment is
2816 : : * detached, so that EXPLAIN can access it.
2817 : : */
2818 : : void
2819 : 42 : ExecHashRetrieveInstrumentation(HashState *node)
2820 : : {
2821 : 42 : SharedHashInfo *shared_info = node->shared_info;
2822 : : size_t size;
2823 : :
2261 tgl@sss.pgh.pa.us 2824 [ - + ]: 42 : if (shared_info == NULL)
2261 tgl@sss.pgh.pa.us 2825 :UBC 0 : return;
2826 : :
2827 : : /* Replace node->shared_info with a copy in backend-local memory. */
2322 andres@anarazel.de 2828 :CBC 42 : size = offsetof(SharedHashInfo, hinstrument) +
2829 : 42 : shared_info->num_workers * sizeof(HashInstrumentation);
2830 : 42 : node->shared_info = palloc(size);
2831 : 42 : memcpy(node->shared_info, shared_info, size);
2832 : : }
2833 : :
2834 : : /*
2835 : : * Accumulate instrumentation data from 'hashtable' into an
2836 : : * initially-zeroed HashInstrumentation struct.
2837 : : *
2838 : : * This is used to merge information across successive hash table instances
2839 : : * within a single plan node. We take the maximum values of each interesting
2840 : : * number. The largest nbuckets and largest nbatch values might have occurred
2841 : : * in different instances, so there's some risk of confusion from reporting
2842 : : * unrelated numbers; but there's a bigger risk of misdiagnosing a performance
2843 : : * issue if we don't report the largest values. Similarly, we want to report
2844 : : * the largest spacePeak regardless of whether it happened in the same
2845 : : * instance as the largest nbuckets or nbatch. All the instances should have
2846 : : * the same nbuckets_original and nbatch_original; but there's little value
2847 : : * in depending on that here, so handle them the same way.
2848 : : */
2849 : : void
1464 tgl@sss.pgh.pa.us 2850 : 165 : ExecHashAccumInstrumentation(HashInstrumentation *instrument,
2851 : : HashJoinTable hashtable)
2852 : : {
2853 : 165 : instrument->nbuckets = Max(instrument->nbuckets,
2854 : : hashtable->nbuckets);
2855 : 165 : instrument->nbuckets_original = Max(instrument->nbuckets_original,
2856 : : hashtable->nbuckets_original);
2857 : 165 : instrument->nbatch = Max(instrument->nbatch,
2858 : : hashtable->nbatch);
2859 : 165 : instrument->nbatch_original = Max(instrument->nbatch_original,
2860 : : hashtable->nbatch_original);
2861 : 165 : instrument->space_peak = Max(instrument->space_peak,
2862 : : hashtable->spacePeak);
2322 andres@anarazel.de 2863 : 165 : }
2864 : :
2865 : : /*
2866 : : * Allocate 'size' bytes from the currently active HashMemoryChunk
2867 : : */
2868 : : static void *
3504 heikki.linnakangas@i 2869 : 4656223 : dense_alloc(HashJoinTable hashtable, Size size)
2870 : : {
2871 : : HashMemoryChunk newChunk;
2872 : : char *ptr;
2873 : :
2874 : : /* just in case the size is not already aligned properly */
2875 : 4656223 : size = MAXALIGN(size);
2876 : :
2877 : : /*
2878 : : * If tuple size is larger than threshold, allocate a separate chunk.
2879 : : */
2880 [ - + ]: 4656223 : if (size > HASH_CHUNK_THRESHOLD)
2881 : : {
2882 : : /* allocate new chunk and put it at the beginning of the list */
3504 heikki.linnakangas@i 2883 :UBC 0 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2884 : : HASH_CHUNK_HEADER_SIZE + size);
2885 : 0 : newChunk->maxlen = size;
2294 tgl@sss.pgh.pa.us 2886 : 0 : newChunk->used = size;
2887 : 0 : newChunk->ntuples = 1;
2888 : :
2889 : : /*
2890 : : * Add this chunk to the list after the first existing chunk, so that
2891 : : * we don't lose the remaining space in the "current" chunk.
2892 : : */
3504 heikki.linnakangas@i 2893 [ # # ]: 0 : if (hashtable->chunks != NULL)
2894 : : {
2895 : 0 : newChunk->next = hashtable->chunks->next;
2307 andres@anarazel.de 2896 : 0 : hashtable->chunks->next.unshared = newChunk;
2897 : : }
2898 : : else
2899 : : {
2900 : 0 : newChunk->next.unshared = hashtable->chunks;
3504 heikki.linnakangas@i 2901 : 0 : hashtable->chunks = newChunk;
2902 : : }
2903 : :
2294 tgl@sss.pgh.pa.us 2904 : 0 : return HASH_CHUNK_DATA(newChunk);
2905 : : }
2906 : :
2907 : : /*
2908 : : * See if we have enough space for it in the current chunk (if any). If
2909 : : * not, allocate a fresh chunk.
2910 : : */
3504 heikki.linnakangas@i 2911 [ + + ]:CBC 4656223 : if ((hashtable->chunks == NULL) ||
2912 [ + + ]: 4645955 : (hashtable->chunks->maxlen - hashtable->chunks->used) < size)
2913 : : {
2914 : : /* allocate new chunk and put it at the beginning of the list */
2915 : 16005 : newChunk = (HashMemoryChunk) MemoryContextAlloc(hashtable->batchCxt,
2916 : : HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
2917 : :
2918 : 16005 : newChunk->maxlen = HASH_CHUNK_SIZE;
2919 : 16005 : newChunk->used = size;
2920 : 16005 : newChunk->ntuples = 1;
2921 : :
2307 andres@anarazel.de 2922 : 16005 : newChunk->next.unshared = hashtable->chunks;
3504 heikki.linnakangas@i 2923 : 16005 : hashtable->chunks = newChunk;
2924 : :
2294 tgl@sss.pgh.pa.us 2925 : 16005 : return HASH_CHUNK_DATA(newChunk);
2926 : : }
2927 : :
2928 : : /* There is enough space in the current chunk, let's add the tuple */
2929 : 4640218 : ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
3504 heikki.linnakangas@i 2930 : 4640218 : hashtable->chunks->used += size;
2931 : 4640218 : hashtable->chunks->ntuples += 1;
2932 : :
2933 : : /* return pointer to the start of the tuple memory */
2934 : 4640218 : return ptr;
2935 : : }
2936 : :
2937 : : /*
2938 : : * Allocate space for a tuple in shared dense storage. This is equivalent to
2939 : : * dense_alloc but for Parallel Hash using shared memory.
2940 : : *
2941 : : * While loading a tuple into shared memory, we might run out of memory and
2942 : : * decide to repartition, or determine that the load factor is too high and
2943 : : * decide to expand the bucket array, or discover that another participant has
2944 : : * commanded us to help do that. Return NULL if number of buckets or batches
2945 : : * has changed, indicating that the caller must retry (considering the
2946 : : * possibility that the tuple no longer belongs in the same batch).
2947 : : */
2948 : : static HashJoinTuple
2307 andres@anarazel.de 2949 : 1202298 : ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t size,
2950 : : dsa_pointer *shared)
2951 : : {
2952 : 1202298 : ParallelHashJoinState *pstate = hashtable->parallel_state;
2953 : : dsa_pointer chunk_shared;
2954 : : HashMemoryChunk chunk;
2955 : : Size chunk_size;
2956 : : HashJoinTuple result;
2957 : 1202298 : int curbatch = hashtable->curbatch;
2958 : :
2959 : 1202298 : size = MAXALIGN(size);
2960 : :
2961 : : /*
2962 : : * Fast path: if there is enough space in this backend's current chunk,
2963 : : * then we can allocate without any locking.
2964 : : */
2965 : 1202298 : chunk = hashtable->current_chunk;
2966 [ + + + - ]: 1202298 : if (chunk != NULL &&
2293 tgl@sss.pgh.pa.us 2967 : 1201649 : size <= HASH_CHUNK_THRESHOLD &&
2307 andres@anarazel.de 2968 [ + + ]: 1201649 : chunk->maxlen - chunk->used >= size)
2969 : : {
2970 : :
2971 : 1200387 : chunk_shared = hashtable->current_chunk_shared;
2972 [ - + ]: 1200387 : Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
2973 : 1200387 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE + chunk->used;
2294 tgl@sss.pgh.pa.us 2974 : 1200387 : result = (HashJoinTuple) (HASH_CHUNK_DATA(chunk) + chunk->used);
2307 andres@anarazel.de 2975 : 1200387 : chunk->used += size;
2976 : :
2977 [ - + ]: 1200387 : Assert(chunk->used <= chunk->maxlen);
2978 [ - + ]: 1200387 : Assert(result == dsa_get_address(hashtable->area, *shared));
2979 : :
2980 : 1200387 : return result;
2981 : : }
2982 : :
2983 : : /* Slow path: try to allocate a new chunk. */
2984 : 1911 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
2985 : :
2986 : : /*
2987 : : * Check if we need to help increase the number of buckets or batches.
2988 : : */
2989 [ + + ]: 1911 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
2990 [ + + ]: 1882 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
2991 : : {
2992 : 85 : ParallelHashGrowth growth = pstate->growth;
2993 : :
2994 : 85 : hashtable->current_chunk = NULL;
2995 : 85 : LWLockRelease(&pstate->lock);
2996 : :
2997 : : /* Another participant has commanded us to help grow. */
2998 [ + + ]: 85 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
2999 : 29 : ExecParallelHashIncreaseNumBatches(hashtable);
3000 [ + - ]: 56 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3001 : 56 : ExecParallelHashIncreaseNumBuckets(hashtable);
3002 : :
3003 : : /* The caller must retry. */
3004 : 85 : return NULL;
3005 : : }
3006 : :
3007 : : /* Oversized tuples get their own chunk. */
3008 [ + + ]: 1826 : if (size > HASH_CHUNK_THRESHOLD)
3009 : 24 : chunk_size = size + HASH_CHUNK_HEADER_SIZE;
3010 : : else
3011 : 1802 : chunk_size = HASH_CHUNK_SIZE;
3012 : :
3013 : : /* Check if it's time to grow batches or buckets. */
3014 [ + + ]: 1826 : if (pstate->growth != PHJ_GROWTH_DISABLED)
3015 : : {
3016 [ - + ]: 926 : Assert(curbatch == 0);
388 tmunro@postgresql.or 3017 [ - + ]: 926 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_HASH_INNER);
3018 : :
3019 : : /*
3020 : : * Check if our space limit would be exceeded. To avoid choking on
3021 : : * very large tuples or very low hash_mem setting, we'll always allow
3022 : : * each backend to allocate at least one chunk.
3023 : : */
2307 andres@anarazel.de 3024 [ + + ]: 926 : if (hashtable->batches[0].at_least_one_chunk &&
3025 : 672 : hashtable->batches[0].shared->size +
3026 [ + + ]: 672 : chunk_size > pstate->space_allowed)
3027 : : {
3028 : 21 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3029 : 21 : hashtable->batches[0].shared->space_exhausted = true;
3030 : 21 : LWLockRelease(&pstate->lock);
3031 : :
3032 : 21 : return NULL;
3033 : : }
3034 : :
3035 : : /* Check if our load factor limit would be exceeded. */
3036 [ + + ]: 905 : if (hashtable->nbatch == 1)
3037 : : {
3038 : 790 : hashtable->batches[0].shared->ntuples += hashtable->batches[0].ntuples;
3039 : 790 : hashtable->batches[0].ntuples = 0;
3040 : : /* Guard against integer overflow and alloc size overflow */
3041 : 790 : if (hashtable->batches[0].shared->ntuples + 1 >
3042 [ + + ]: 790 : hashtable->nbuckets * NTUP_PER_BUCKET &&
2135 tmunro@postgresql.or 3043 [ + - ]: 52 : hashtable->nbuckets < (INT_MAX / 2) &&
3044 [ + - ]: 52 : hashtable->nbuckets * 2 <=
3045 : : MaxAllocSize / sizeof(dsa_pointer_atomic))
3046 : : {
2307 andres@anarazel.de 3047 : 52 : pstate->growth = PHJ_GROWTH_NEED_MORE_BUCKETS;
3048 : 52 : LWLockRelease(&pstate->lock);
3049 : :
3050 : 52 : return NULL;
3051 : : }
3052 : : }
3053 : : }
3054 : :
3055 : : /* We are cleared to allocate a new chunk. */
3056 : 1753 : chunk_shared = dsa_allocate(hashtable->area, chunk_size);
3057 : 1753 : hashtable->batches[curbatch].shared->size += chunk_size;
3058 : 1753 : hashtable->batches[curbatch].at_least_one_chunk = true;
3059 : :
3060 : : /* Set up the chunk. */
3061 : 1753 : chunk = (HashMemoryChunk) dsa_get_address(hashtable->area, chunk_shared);
3062 : 1753 : *shared = chunk_shared + HASH_CHUNK_HEADER_SIZE;
3063 : 1753 : chunk->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
3064 : 1753 : chunk->used = size;
3065 : :
3066 : : /*
3067 : : * Push it onto the list of chunks, so that it can be found if we need to
3068 : : * increase the number of buckets or batches (batch 0 only) and later for
3069 : : * freeing the memory (all batches).
3070 : : */
3071 : 1753 : chunk->next.shared = hashtable->batches[curbatch].shared->chunks;
3072 : 1753 : hashtable->batches[curbatch].shared->chunks = chunk_shared;
3073 : :
3074 [ + + ]: 1753 : if (size <= HASH_CHUNK_THRESHOLD)
3075 : : {
3076 : : /*
3077 : : * Make this the current chunk so that we can use the fast path to
3078 : : * fill the rest of it up in future calls.
3079 : : */
3080 : 1735 : hashtable->current_chunk = chunk;
3081 : 1735 : hashtable->current_chunk_shared = chunk_shared;
3082 : : }
3083 : 1753 : LWLockRelease(&pstate->lock);
3084 : :
2294 tgl@sss.pgh.pa.us 3085 [ - + ]: 1753 : Assert(HASH_CHUNK_DATA(chunk) == dsa_get_address(hashtable->area, *shared));
3086 : 1753 : result = (HashJoinTuple) HASH_CHUNK_DATA(chunk);
3087 : :
2307 andres@anarazel.de 3088 : 1753 : return result;
3089 : : }
3090 : :
3091 : : /*
3092 : : * One backend needs to set up the shared batch state including tuplestores.
3093 : : * Other backends will ensure they have correctly configured accessors by
3094 : : * called ExecParallelHashEnsureBatchAccessors().
3095 : : */
3096 : : static void
3097 : 120 : ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
3098 : : {
3099 : 120 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3100 : : ParallelHashJoinBatch *batches;
3101 : : MemoryContext oldcxt;
3102 : : int i;
3103 : :
3104 [ - + ]: 120 : Assert(hashtable->batches == NULL);
3105 : :
3106 : : /* Allocate space. */
3107 : 120 : pstate->batches =
3108 : 120 : dsa_allocate0(hashtable->area,
3109 : : EstimateParallelHashJoinBatch(hashtable) * nbatch);
3110 : 120 : pstate->nbatch = nbatch;
3111 : 120 : batches = dsa_get_address(hashtable->area, pstate->batches);
3112 : :
3113 : : /*
3114 : : * Use hash join spill memory context to allocate accessors, including
3115 : : * buffers for the temporary files.
3116 : : */
331 tomas.vondra@postgre 3117 : 120 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3118 : :
3119 : : /* Allocate this backend's accessor array. */
2307 andres@anarazel.de 3120 : 120 : hashtable->nbatch = nbatch;
580 peter@eisentraut.org 3121 : 120 : hashtable->batches =
3122 : 120 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3123 : :
3124 : : /* Set up the shared state, tuplestores and backend-local accessors. */
2307 andres@anarazel.de 3125 [ + + ]: 729 : for (i = 0; i < hashtable->nbatch; ++i)
3126 : : {
3127 : 609 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3128 : 609 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3129 : : char name[MAXPGPATH];
3130 : :
3131 : : /*
3132 : : * All members of shared were zero-initialized. We just need to set
3133 : : * up the Barrier.
3134 : : */
3135 : 609 : BarrierInit(&shared->batch_barrier, 0);
3136 [ + + ]: 609 : if (i == 0)
3137 : : {
3138 : : /* Batch 0 doesn't need to be loaded. */
3139 : 120 : BarrierAttach(&shared->batch_barrier);
388 tmunro@postgresql.or 3140 [ + + ]: 480 : while (BarrierPhase(&shared->batch_barrier) < PHJ_BATCH_PROBE)
2307 andres@anarazel.de 3141 : 360 : BarrierArriveAndWait(&shared->batch_barrier, 0);
3142 : 120 : BarrierDetach(&shared->batch_barrier);
3143 : : }
3144 : :
3145 : : /* Initialize accessor state. All members were zero-initialized. */
3146 : 609 : accessor->shared = shared;
3147 : :
3148 : : /* Initialize the shared tuplestores. */
3149 : 609 : snprintf(name, sizeof(name), "i%dof%d", i, hashtable->nbatch);
3150 : 609 : accessor->inner_tuples =
3151 : 609 : sts_initialize(ParallelHashJoinBatchInner(shared),
3152 : : pstate->nparticipants,
3153 : : ParallelWorkerNumber + 1,
3154 : : sizeof(uint32),
3155 : : SHARED_TUPLESTORE_SINGLE_PASS,
3156 : : &pstate->fileset,
3157 : : name);
3158 : 609 : snprintf(name, sizeof(name), "o%dof%d", i, hashtable->nbatch);
3159 : 609 : accessor->outer_tuples =
3160 : 609 : sts_initialize(ParallelHashJoinBatchOuter(shared,
3161 : : pstate->nparticipants),
3162 : : pstate->nparticipants,
3163 : : ParallelWorkerNumber + 1,
3164 : : sizeof(uint32),
3165 : : SHARED_TUPLESTORE_SINGLE_PASS,
3166 : : &pstate->fileset,
3167 : : name);
3168 : : }
3169 : :
3170 : 120 : MemoryContextSwitchTo(oldcxt);
3171 : 120 : }
3172 : :
3173 : : /*
3174 : : * Free the current set of ParallelHashJoinBatchAccessor objects.
3175 : : */
3176 : : static void
3177 : 59 : ExecParallelHashCloseBatchAccessors(HashJoinTable hashtable)
3178 : : {
3179 : : int i;
3180 : :
3181 [ + + ]: 322 : for (i = 0; i < hashtable->nbatch; ++i)
3182 : : {
3183 : : /* Make sure no files are left open. */
3184 : 263 : sts_end_write(hashtable->batches[i].inner_tuples);
3185 : 263 : sts_end_write(hashtable->batches[i].outer_tuples);
3186 : 263 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3187 : 263 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3188 : : }
3189 : 59 : pfree(hashtable->batches);
3190 : 59 : hashtable->batches = NULL;
3191 : 59 : }
3192 : :
3193 : : /*
3194 : : * Make sure this backend has up-to-date accessors for the current set of
3195 : : * batches.
3196 : : */
3197 : : static void
3198 : 519 : ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
3199 : : {
3200 : 519 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3201 : : ParallelHashJoinBatch *batches;
3202 : : MemoryContext oldcxt;
3203 : : int i;
3204 : :
3205 [ + + ]: 519 : if (hashtable->batches != NULL)
3206 : : {
3207 [ + + ]: 394 : if (hashtable->nbatch == pstate->nbatch)
3208 : 382 : return;
3209 : 12 : ExecParallelHashCloseBatchAccessors(hashtable);
3210 : : }
3211 : :
3212 : : /*
3213 : : * We should never see a state where the batch-tracking array is freed,
3214 : : * because we should have given up sooner if we join when the build
3215 : : * barrier has reached the PHJ_BUILD_FREE phase.
3216 : : */
390 tmunro@postgresql.or 3217 [ - + ]: 137 : Assert(DsaPointerIsValid(pstate->batches));
3218 : :
3219 : : /*
3220 : : * Use hash join spill memory context to allocate accessors, including
3221 : : * buffers for the temporary files.
3222 : : */
331 tomas.vondra@postgre 3223 : 137 : oldcxt = MemoryContextSwitchTo(hashtable->spillCxt);
3224 : :
3225 : : /* Allocate this backend's accessor array. */
2307 andres@anarazel.de 3226 : 137 : hashtable->nbatch = pstate->nbatch;
580 peter@eisentraut.org 3227 : 137 : hashtable->batches =
3228 : 137 : palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
3229 : :
3230 : : /* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
3231 : : batches = (ParallelHashJoinBatch *)
2307 andres@anarazel.de 3232 : 137 : dsa_get_address(hashtable->area, pstate->batches);
3233 : :
3234 : : /* Set up the accessor array and attach to the tuplestores. */
3235 [ + + ]: 868 : for (i = 0; i < hashtable->nbatch; ++i)
3236 : : {
3237 : 731 : ParallelHashJoinBatchAccessor *accessor = &hashtable->batches[i];
3238 : 731 : ParallelHashJoinBatch *shared = NthParallelHashJoinBatch(batches, i);
3239 : :
3240 : 731 : accessor->shared = shared;
3241 : 731 : accessor->preallocated = 0;
3242 : 731 : accessor->done = false;
380 tmunro@postgresql.or 3243 : 731 : accessor->outer_eof = false;
2307 andres@anarazel.de 3244 : 731 : accessor->inner_tuples =
3245 : 731 : sts_attach(ParallelHashJoinBatchInner(shared),
3246 : : ParallelWorkerNumber + 1,
3247 : : &pstate->fileset);
3248 : 731 : accessor->outer_tuples =
3249 : 731 : sts_attach(ParallelHashJoinBatchOuter(shared,
3250 : : pstate->nparticipants),
3251 : : ParallelWorkerNumber + 1,
3252 : : &pstate->fileset);
3253 : : }
3254 : :
3255 : 137 : MemoryContextSwitchTo(oldcxt);
3256 : : }
3257 : :
3258 : : /*
3259 : : * Allocate an empty shared memory hash table for a given batch.
3260 : : */
3261 : : void
3262 : 471 : ExecParallelHashTableAlloc(HashJoinTable hashtable, int batchno)
3263 : : {
3264 : 471 : ParallelHashJoinBatch *batch = hashtable->batches[batchno].shared;
3265 : : dsa_pointer_atomic *buckets;
3266 : 471 : int nbuckets = hashtable->parallel_state->nbuckets;
3267 : : int i;
3268 : :
3269 : 471 : batch->buckets =
3270 : 471 : dsa_allocate(hashtable->area, sizeof(dsa_pointer_atomic) * nbuckets);
3271 : : buckets = (dsa_pointer_atomic *)
3272 : 471 : dsa_get_address(hashtable->area, batch->buckets);
3273 [ + + ]: 1742295 : for (i = 0; i < nbuckets; ++i)
3274 : 1741824 : dsa_pointer_atomic_init(&buckets[i], InvalidDsaPointer);
3275 : 471 : }
3276 : :
3277 : : /*
3278 : : * If we are currently attached to a shared hash join batch, detach. If we
3279 : : * are last to detach, clean up.
3280 : : */
3281 : : void
3282 : 10378 : ExecHashTableDetachBatch(HashJoinTable hashtable)
3283 : : {
3284 [ + + ]: 10378 : if (hashtable->parallel_state != NULL &&
3285 [ + + ]: 750 : hashtable->curbatch >= 0)
3286 : : {
3287 : 552 : int curbatch = hashtable->curbatch;
3288 : 552 : ParallelHashJoinBatch *batch = hashtable->batches[curbatch].shared;
380 tmunro@postgresql.or 3289 : 552 : bool attached = true;
3290 : :
3291 : : /* Make sure any temporary files are closed. */
2307 andres@anarazel.de 3292 : 552 : sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
3293 : 552 : sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
3294 : :
3295 : : /* After attaching we always get at least to PHJ_BATCH_PROBE. */
380 tmunro@postgresql.or 3296 [ + + - + ]: 552 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
3297 : : BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_SCAN);
3298 : :
3299 : : /*
3300 : : * If we're abandoning the PHJ_BATCH_PROBE phase early without having
3301 : : * reached the end of it, it means the plan doesn't want any more
3302 : : * tuples, and it is happy to abandon any tuples buffered in this
3303 : : * process's subplans. For correctness, we can't allow any process to
3304 : : * execute the PHJ_BATCH_SCAN phase, because we will never have the
3305 : : * complete set of match bits. Therefore we skip emitting unmatched
3306 : : * tuples in all backends (if this is a full/right join), as if those
3307 : : * tuples were all due to be emitted by this process and it has
3308 : : * abandoned them too.
3309 : : */
3310 [ + + ]: 552 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
3311 [ - + ]: 519 : !hashtable->batches[curbatch].outer_eof)
3312 : : {
3313 : : /*
3314 : : * This flag may be written to by multiple backends during
3315 : : * PHJ_BATCH_PROBE phase, but will only be read in PHJ_BATCH_SCAN
3316 : : * phase so requires no extra locking.
3317 : : */
380 tmunro@postgresql.or 3318 :UBC 0 : batch->skip_unmatched = true;
3319 : : }
3320 : :
3321 : : /*
3322 : : * Even if we aren't doing a full/right outer join, we'll step through
3323 : : * the PHJ_BATCH_SCAN phase just to maintain the invariant that
3324 : : * freeing happens in PHJ_BATCH_FREE, but that'll be wait-free.
3325 : : */
380 tmunro@postgresql.or 3326 [ + + ]:CBC 552 : if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
3327 : 519 : attached = BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
3328 [ + + + - ]: 552 : if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
3329 : : {
3330 : : /*
3331 : : * We are not longer attached to the batch barrier, but we're the
3332 : : * process that was chosen to free resources and it's safe to
3333 : : * assert the current phase. The ParallelHashJoinBatch can't go
3334 : : * away underneath us while we are attached to the build barrier,
3335 : : * making this access safe.
3336 : : */
388 3337 [ - + ]: 471 : Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_FREE);
3338 : :
3339 : : /* Free shared chunks and buckets. */
2307 andres@anarazel.de 3340 [ + + ]: 2057 : while (DsaPointerIsValid(batch->chunks))
3341 : : {
3342 : : HashMemoryChunk chunk =
331 tgl@sss.pgh.pa.us 3343 : 1586 : dsa_get_address(hashtable->area, batch->chunks);
2307 andres@anarazel.de 3344 : 1586 : dsa_pointer next = chunk->next.shared;
3345 : :
3346 : 1586 : dsa_free(hashtable->area, batch->chunks);
3347 : 1586 : batch->chunks = next;
3348 : : }
3349 [ + - ]: 471 : if (DsaPointerIsValid(batch->buckets))
3350 : : {
3351 : 471 : dsa_free(hashtable->area, batch->buckets);
3352 : 471 : batch->buckets = InvalidDsaPointer;
3353 : : }
3354 : : }
3355 : :
3356 : : /*
3357 : : * Track the largest batch we've been attached to. Though each
3358 : : * backend might see a different subset of batches, explain.c will
3359 : : * scan the results from all backends to find the largest value.
3360 : : */
2295 3361 : 552 : hashtable->spacePeak =
3362 : 552 : Max(hashtable->spacePeak,
3363 : : batch->size + sizeof(dsa_pointer_atomic) * hashtable->nbuckets);
3364 : :
3365 : : /* Remember that we are not attached to a batch. */
2307 3366 : 552 : hashtable->curbatch = -1;
3367 : : }
3368 : 10378 : }
3369 : :
3370 : : /*
3371 : : * Detach from all shared resources. If we are last to detach, clean up.
3372 : : */
3373 : : void
3374 : 9826 : ExecHashTableDetach(HashJoinTable hashtable)
3375 : : {
390 tmunro@postgresql.or 3376 : 9826 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3377 : :
3378 : : /*
3379 : : * If we're involved in a parallel query, we must either have gotten all
3380 : : * the way to PHJ_BUILD_RUN, or joined too late and be in PHJ_BUILD_FREE.
3381 : : */
3382 [ + + - + ]: 9826 : Assert(!pstate ||
3383 : : BarrierPhase(&pstate->build_barrier) >= PHJ_BUILD_RUN);
3384 : :
388 3385 [ + + + - ]: 9826 : if (pstate && BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_RUN)
3386 : : {
3387 : : int i;
3388 : :
3389 : : /* Make sure any temporary files are closed. */
2307 andres@anarazel.de 3390 [ + - ]: 198 : if (hashtable->batches)
3391 : : {
3392 [ + + ]: 1275 : for (i = 0; i < hashtable->nbatch; ++i)
3393 : : {
3394 : 1077 : sts_end_write(hashtable->batches[i].inner_tuples);
3395 : 1077 : sts_end_write(hashtable->batches[i].outer_tuples);
3396 : 1077 : sts_end_parallel_scan(hashtable->batches[i].inner_tuples);
3397 : 1077 : sts_end_parallel_scan(hashtable->batches[i].outer_tuples);
3398 : : }
3399 : : }
3400 : :
3401 : : /* If we're last to detach, clean up shared memory. */
390 tmunro@postgresql.or 3402 [ + + ]: 198 : if (BarrierArriveAndDetach(&pstate->build_barrier))
3403 : : {
3404 : : /*
3405 : : * Late joining processes will see this state and give up
3406 : : * immediately.
3407 : : */
388 3408 [ - + ]: 84 : Assert(BarrierPhase(&pstate->build_barrier) == PHJ_BUILD_FREE);
3409 : :
2307 andres@anarazel.de 3410 [ + - ]: 84 : if (DsaPointerIsValid(pstate->batches))
3411 : : {
3412 : 84 : dsa_free(hashtable->area, pstate->batches);
3413 : 84 : pstate->batches = InvalidDsaPointer;
3414 : : }
3415 : : }
3416 : : }
390 tmunro@postgresql.or 3417 : 9826 : hashtable->parallel_state = NULL;
2307 andres@anarazel.de 3418 : 9826 : }
3419 : :
3420 : : /*
3421 : : * Get the first tuple in a given bucket identified by number.
3422 : : */
3423 : : static inline HashJoinTuple
3424 : 1387215 : ExecParallelHashFirstTuple(HashJoinTable hashtable, int bucketno)
3425 : : {
3426 : : HashJoinTuple tuple;
3427 : : dsa_pointer p;
3428 : :
3429 [ - + ]: 1387215 : Assert(hashtable->parallel_state);
3430 : 1387215 : p = dsa_pointer_atomic_read(&hashtable->buckets.shared[bucketno]);
3431 : 1387215 : tuple = (HashJoinTuple) dsa_get_address(hashtable->area, p);
3432 : :
3433 : 1387215 : return tuple;
3434 : : }
3435 : :
3436 : : /*
3437 : : * Get the next tuple in the same bucket as 'tuple'.
3438 : : */
3439 : : static inline HashJoinTuple
3440 : 1905385 : ExecParallelHashNextTuple(HashJoinTable hashtable, HashJoinTuple tuple)
3441 : : {
3442 : : HashJoinTuple next;
3443 : :
3444 [ - + ]: 1905385 : Assert(hashtable->parallel_state);
3445 : 1905385 : next = (HashJoinTuple) dsa_get_address(hashtable->area, tuple->next.shared);
3446 : :
3447 : 1905385 : return next;
3448 : : }
3449 : :
3450 : : /*
3451 : : * Insert a tuple at the front of a chain of tuples in DSA memory atomically.
3452 : : */
3453 : : static inline void
3454 : 1464354 : ExecParallelHashPushTuple(dsa_pointer_atomic *head,
3455 : : HashJoinTuple tuple,
3456 : : dsa_pointer tuple_shared)
3457 : : {
3458 : : for (;;)
3459 : : {
3460 : 1469452 : tuple->next.shared = dsa_pointer_atomic_read(head);
3461 [ + + ]: 1469452 : if (dsa_pointer_atomic_compare_exchange(head,
3462 : 1469452 : &tuple->next.shared,
3463 : : tuple_shared))
3464 : 1464354 : break;
3465 : : }
3466 : 1464354 : }
3467 : :
3468 : : /*
3469 : : * Prepare to work on a given batch.
3470 : : */
3471 : : void
3472 : 1295 : ExecParallelHashTableSetCurrentBatch(HashJoinTable hashtable, int batchno)
3473 : : {
3474 [ - + ]: 1295 : Assert(hashtable->batches[batchno].shared->buckets != InvalidDsaPointer);
3475 : :
3476 : 1295 : hashtable->curbatch = batchno;
3477 : 1295 : hashtable->buckets.shared = (dsa_pointer_atomic *)
3478 : 1295 : dsa_get_address(hashtable->area,
3479 : 1295 : hashtable->batches[batchno].shared->buckets);
3480 : 1295 : hashtable->nbuckets = hashtable->parallel_state->nbuckets;
3481 : 1295 : hashtable->log2_nbuckets = my_log2(hashtable->nbuckets);
3482 : 1295 : hashtable->current_chunk = NULL;
3483 : 1295 : hashtable->current_chunk_shared = InvalidDsaPointer;
3484 : 1295 : hashtable->batches[batchno].at_least_one_chunk = false;
3485 : 1295 : }
3486 : :
3487 : : /*
3488 : : * Take the next available chunk from the queue of chunks being worked on in
3489 : : * parallel. Return NULL if there are none left. Otherwise return a pointer
3490 : : * to the chunk, and set *shared to the DSA pointer to the chunk.
3491 : : */
3492 : : static HashMemoryChunk
3493 : 602 : ExecParallelHashPopChunkQueue(HashJoinTable hashtable, dsa_pointer *shared)
3494 : : {
3495 : 602 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3496 : : HashMemoryChunk chunk;
3497 : :
3498 : 602 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3499 [ + + ]: 602 : if (DsaPointerIsValid(pstate->chunk_work_queue))
3500 : : {
3501 : 494 : *shared = pstate->chunk_work_queue;
3502 : : chunk = (HashMemoryChunk)
3503 : 494 : dsa_get_address(hashtable->area, *shared);
3504 : 494 : pstate->chunk_work_queue = chunk->next.shared;
3505 : : }
3506 : : else
3507 : 108 : chunk = NULL;
3508 : 602 : LWLockRelease(&pstate->lock);
3509 : :
3510 : 602 : return chunk;
3511 : : }
3512 : :
3513 : : /*
3514 : : * Increase the space preallocated in this backend for a given inner batch by
3515 : : * at least a given amount. This allows us to track whether a given batch
3516 : : * would fit in memory when loaded back in. Also increase the number of
3517 : : * batches or buckets if required.
3518 : : *
3519 : : * This maintains a running estimation of how much space will be taken when we
3520 : : * load the batch back into memory by simulating the way chunks will be handed
3521 : : * out to workers. It's not perfectly accurate because the tuples will be
3522 : : * packed into memory chunks differently by ExecParallelHashTupleAlloc(), but
3523 : : * it should be pretty close. It tends to overestimate by a fraction of a
3524 : : * chunk per worker since all workers gang up to preallocate during hashing,
3525 : : * but workers tend to reload batches alone if there are enough to go around,
3526 : : * leaving fewer partially filled chunks. This effect is bounded by
3527 : : * nparticipants.
3528 : : *
3529 : : * Return false if the number of batches or buckets has changed, and the
3530 : : * caller should reconsider which batch a given tuple now belongs in and call
3531 : : * again.
3532 : : */
3533 : : static bool
3534 : 1017 : ExecParallelHashTuplePrealloc(HashJoinTable hashtable, int batchno, size_t size)
3535 : : {
3536 : 1017 : ParallelHashJoinState *pstate = hashtable->parallel_state;
3537 : 1017 : ParallelHashJoinBatchAccessor *batch = &hashtable->batches[batchno];
3538 : 1017 : size_t want = Max(size, HASH_CHUNK_SIZE - HASH_CHUNK_HEADER_SIZE);
3539 : :
3540 [ - + ]: 1017 : Assert(batchno > 0);
3541 [ - + ]: 1017 : Assert(batchno < hashtable->nbatch);
2293 tgl@sss.pgh.pa.us 3542 [ - + ]: 1017 : Assert(size == MAXALIGN(size));
3543 : :
2307 andres@anarazel.de 3544 : 1017 : LWLockAcquire(&pstate->lock, LW_EXCLUSIVE);
3545 : :
3546 : : /* Has another participant commanded us to help grow? */
3547 [ + + ]: 1017 : if (pstate->growth == PHJ_GROWTH_NEED_MORE_BATCHES ||
3548 [ - + ]: 999 : pstate->growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3549 : : {
3550 : 18 : ParallelHashGrowth growth = pstate->growth;
3551 : :
3552 : 18 : LWLockRelease(&pstate->lock);
3553 [ + - ]: 18 : if (growth == PHJ_GROWTH_NEED_MORE_BATCHES)
3554 : 18 : ExecParallelHashIncreaseNumBatches(hashtable);
2307 andres@anarazel.de 3555 [ # # ]:UBC 0 : else if (growth == PHJ_GROWTH_NEED_MORE_BUCKETS)
3556 : 0 : ExecParallelHashIncreaseNumBuckets(hashtable);
3557 : :
2307 andres@anarazel.de 3558 :CBC 18 : return false;
3559 : : }
3560 : :
3561 [ + + ]: 999 : if (pstate->growth != PHJ_GROWTH_DISABLED &&
3562 [ + + ]: 882 : batch->at_least_one_chunk &&
2293 tgl@sss.pgh.pa.us 3563 : 144 : (batch->shared->estimated_size + want + HASH_CHUNK_HEADER_SIZE
3564 [ + + ]: 144 : > pstate->space_allowed))
3565 : : {
3566 : : /*
3567 : : * We have determined that this batch would exceed the space budget if
3568 : : * loaded into memory. Command all participants to help repartition.
3569 : : */
2307 andres@anarazel.de 3570 : 15 : batch->shared->space_exhausted = true;
3571 : 15 : pstate->growth = PHJ_GROWTH_NEED_MORE_BATCHES;
3572 : 15 : LWLockRelease(&pstate->lock);
3573 : :
3574 : 15 : return false;
3575 : : }
3576 : :
3577 : 984 : batch->at_least_one_chunk = true;
3578 : 984 : batch->shared->estimated_size += want + HASH_CHUNK_HEADER_SIZE;
3579 : 984 : batch->preallocated = want;
3580 : 984 : LWLockRelease(&pstate->lock);
3581 : :
3582 : 984 : return true;
3583 : : }
3584 : :
3585 : : /*
3586 : : * Calculate the limit on how much memory can be used by Hash and similar
3587 : : * plan types. This is work_mem times hash_mem_multiplier, and is
3588 : : * expressed in bytes.
3589 : : *
3590 : : * Exported for use by the planner, as well as other hash-like executor
3591 : : * nodes. This is a rather random place for this, but there is no better
3592 : : * place.
3593 : : */
3594 : : size_t
994 tgl@sss.pgh.pa.us 3595 : 546043 : get_hash_memory_limit(void)
3596 : : {
3597 : : double mem_limit;
3598 : :
3599 : : /* Do initial calculation in double arithmetic */
3600 : 546043 : mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
3601 : :
3602 : : /* Clamp in case it doesn't fit in size_t */
3603 [ + - ]: 546043 : mem_limit = Min(mem_limit, (double) SIZE_MAX);
3604 : :
3605 : 546043 : return (size_t) mem_limit;
3606 : : }
|