Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeMemoize.c
4 : * Routines to handle caching of results from parameterized nodes
5 : *
6 : * Portions Copyright (c) 2021-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeMemoize.c
12 : *
13 : * Memoize nodes are intended to sit above parameterized nodes in the plan
14 : * tree in order to cache results from them. The intention here is that a
15 : * repeat scan with a parameter value that has already been seen by the node
16 : * can fetch tuples from the cache rather than having to re-scan the outer
17 : * node all over again. The query planner may choose to make use of one of
18 : * these when it thinks rescans for previously seen values are likely enough
19 : * to warrant adding the additional node.
20 : *
21 : * The method of cache we use is a hash table. When the cache fills, we never
22 : * spill tuples to disk, instead, we choose to evict the least recently used
23 : * cache entry from the cache. We remember the least recently used entry by
24 : * always pushing new entries and entries we look for onto the tail of a
25 : * doubly linked list. This means that older items always bubble to the top
26 : * of this LRU list.
27 : *
28 : * Sometimes our callers won't run their scans to completion. For example a
29 : * semi-join only needs to run until it finds a matching tuple, and once it
30 : * does, the join operator skips to the next outer tuple and does not execute
31 : * the inner side again on that scan. Because of this, we must keep track of
32 : * when a cache entry is complete, and by default, we know it is when we run
33 : * out of tuples to read during the scan. However, there are cases where we
34 : * can mark the cache entry as complete without exhausting the scan of all
35 : * tuples. One case is unique joins, where the join operator knows that there
36 : * will only be at most one match for any given outer tuple. In order to
37 : * support such cases we allow the "singlerow" option to be set for the cache.
38 : * This option marks the cache entry as complete after we read the first tuple
39 : * from the subnode.
40 : *
41 : * It's possible when we're filling the cache for a given set of parameters
42 : * that we're unable to free enough memory to store any more tuples. If this
43 : * happens then we'll have already evicted all other cache entries. When
44 : * caching another tuple would cause us to exceed our memory budget, we must
45 : * free the entry that we're currently populating and move the state machine
46 : * into MEMO_CACHE_BYPASS_MODE. This means that we'll not attempt to cache
47 : * any further tuples for this particular scan. We don't have the memory for
48 : * it. The state machine will be reset again on the next rescan. If the
49 : * memory requirements to cache the next parameter's tuples are less
50 : * demanding, then that may allow us to start putting useful entries back into
51 : * the cache again.
52 : *
53 : *
54 : * INTERFACE ROUTINES
55 : * ExecMemoize - lookup cache, exec subplan when not found
56 : * ExecInitMemoize - initialize node and subnodes
57 : * ExecEndMemoize - shutdown node and subnodes
58 : * ExecReScanMemoize - rescan the memoize node
59 : *
60 : * ExecMemoizeEstimate estimates DSM space needed for parallel plan
61 : * ExecMemoizeInitializeDSM initialize DSM for parallel plan
62 : * ExecMemoizeInitializeWorker attach to DSM info in parallel worker
63 : * ExecMemoizeRetrieveInstrumentation get instrumentation from worker
64 : *-------------------------------------------------------------------------
65 : */
66 :
67 : #include "postgres.h"
68 :
69 : #include "common/hashfn.h"
70 : #include "executor/executor.h"
71 : #include "executor/nodeMemoize.h"
72 : #include "lib/ilist.h"
73 : #include "miscadmin.h"
74 : #include "utils/datum.h"
75 : #include "utils/lsyscache.h"
76 :
77 : /* States of the ExecMemoize state machine */
78 : #define MEMO_CACHE_LOOKUP 1 /* Attempt to perform a cache lookup */
79 : #define MEMO_CACHE_FETCH_NEXT_TUPLE 2 /* Get another tuple from the cache */
80 : #define MEMO_FILLING_CACHE 3 /* Read outer node to fill cache */
81 : #define MEMO_CACHE_BYPASS_MODE 4 /* Bypass mode. Just read from our
82 : * subplan without caching anything */
83 : #define MEMO_END_OF_SCAN 5 /* Ready for rescan */
84 :
85 :
86 : /* Helper macros for memory accounting */
87 : #define EMPTY_ENTRY_MEMORY_BYTES(e) (sizeof(MemoizeEntry) + \
88 : sizeof(MemoizeKey) + \
89 : (e)->key->params->t_len);
90 : #define CACHE_TUPLE_BYTES(t) (sizeof(MemoizeTuple) + \
91 : (t)->mintuple->t_len)
92 :
93 : /* MemoizeTuple Stores an individually cached tuple */
94 : typedef struct MemoizeTuple
95 : {
96 : MinimalTuple mintuple; /* Cached tuple */
97 : struct MemoizeTuple *next; /* The next tuple with the same parameter
98 : * values or NULL if it's the last one */
99 : } MemoizeTuple;
100 :
101 : /*
102 : * MemoizeKey
103 : * The hash table key for cached entries plus the LRU list link
104 : */
105 : typedef struct MemoizeKey
106 : {
107 : MinimalTuple params;
108 : dlist_node lru_node; /* Pointer to next/prev key in LRU list */
109 : } MemoizeKey;
110 :
111 : /*
112 : * MemoizeEntry
113 : * The data struct that the cache hash table stores
114 : */
115 : typedef struct MemoizeEntry
116 : {
117 : MemoizeKey *key; /* Hash key for hash table lookups */
118 : MemoizeTuple *tuplehead; /* Pointer to the first tuple or NULL if no
119 : * tuples are cached for this entry */
120 : uint32 hash; /* Hash value (cached) */
121 : char status; /* Hash status */
122 : bool complete; /* Did we read the outer plan to completion? */
123 : } MemoizeEntry;
124 :
125 :
126 : #define SH_PREFIX memoize
127 : #define SH_ELEMENT_TYPE MemoizeEntry
128 : #define SH_KEY_TYPE MemoizeKey *
129 : #define SH_SCOPE static inline
130 : #define SH_DECLARE
131 : #include "lib/simplehash.h"
132 :
133 : static uint32 MemoizeHash_hash(struct memoize_hash *tb,
134 : const MemoizeKey *key);
135 : static bool MemoizeHash_equal(struct memoize_hash *tb,
136 : const MemoizeKey *key1,
137 : const MemoizeKey *key2);
138 :
139 : #define SH_PREFIX memoize
140 : #define SH_ELEMENT_TYPE MemoizeEntry
141 : #define SH_KEY_TYPE MemoizeKey *
142 : #define SH_KEY key
143 : #define SH_HASH_KEY(tb, key) MemoizeHash_hash(tb, key)
144 : #define SH_EQUAL(tb, a, b) MemoizeHash_equal(tb, a, b)
145 : #define SH_SCOPE static inline
146 : #define SH_STORE_HASH
147 : #define SH_GET_HASH(tb, a) a->hash
148 : #define SH_DEFINE
149 : #include "lib/simplehash.h"
150 :
151 : /*
152 : * MemoizeHash_hash
153 : * Hash function for simplehash hashtable. 'key' is unused here as we
154 : * require that all table lookups first populate the MemoizeState's
155 : * probeslot with the key values to be looked up.
156 : */
157 : static uint32
634 drowley 158 CBC 222946 : MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
159 : {
160 222946 : MemoizeState *mstate = (MemoizeState *) tb->private_data;
161 222946 : TupleTableSlot *pslot = mstate->probeslot;
737 162 222946 : uint32 hashkey = 0;
634 163 222946 : int numkeys = mstate->nkeys;
164 :
501 165 222946 : if (mstate->binary_mode)
166 : {
167 79607 : for (int i = 0; i < numkeys; i++)
168 : {
169 : /* combine successive hashkeys by rotating */
413 john.naylor 170 40933 : hashkey = pg_rotate_left32(hashkey, 1);
171 :
501 drowley 172 40933 : if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
173 : {
174 : FormData_pg_attribute *attr;
175 : uint32 hkey;
176 :
177 40933 : attr = &pslot->tts_tupleDescriptor->attrs[i];
178 :
179 40933 : hkey = datum_image_hash(pslot->tts_values[i], attr->attbyval, attr->attlen);
180 :
181 40933 : hashkey ^= hkey;
182 : }
183 : }
184 : }
185 : else
186 : {
187 184272 : FmgrInfo *hashfunctions = mstate->hashfunctions;
188 184272 : Oid *collations = mstate->collations;
189 :
190 368544 : for (int i = 0; i < numkeys; i++)
191 : {
192 : /* combine successive hashkeys by rotating */
413 john.naylor 193 184272 : hashkey = pg_rotate_left32(hashkey, 1);
194 :
501 drowley 195 184272 : if (!pslot->tts_isnull[i]) /* treat nulls as having hash key 0 */
196 : {
197 : uint32 hkey;
198 :
199 183857 : hkey = DatumGetUInt32(FunctionCall1Coll(&hashfunctions[i],
200 183857 : collations[i], pslot->tts_values[i]));
201 183857 : hashkey ^= hkey;
202 : }
203 : }
204 : }
205 :
737 206 222946 : return murmurhash32(hashkey);
207 : }
208 :
209 : /*
210 : * MemoizeHash_equal
211 : * Equality function for confirming hash value matches during a hash
212 : * table lookup. 'key2' is never used. Instead the MemoizeState's
213 : * probeslot is always populated with details of what's being looked up.
214 : */
215 : static bool
634 216 184810 : MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1,
217 : const MemoizeKey *key2)
218 : {
219 184810 : MemoizeState *mstate = (MemoizeState *) tb->private_data;
220 184810 : ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
221 184810 : TupleTableSlot *tslot = mstate->tableslot;
222 184810 : TupleTableSlot *pslot = mstate->probeslot;
223 :
224 : /* probeslot should have already been prepared by prepare_probe_slot() */
737 225 184810 : ExecStoreMinimalTuple(key1->params, tslot, false);
226 :
501 227 184810 : if (mstate->binary_mode)
228 : {
229 38421 : int numkeys = mstate->nkeys;
230 :
231 38421 : slot_getallattrs(tslot);
232 38421 : slot_getallattrs(pslot);
233 :
234 79071 : for (int i = 0; i < numkeys; i++)
235 : {
236 : FormData_pg_attribute *attr;
237 :
238 40650 : if (tslot->tts_isnull[i] != pslot->tts_isnull[i])
501 drowley 239 UBC 0 : return false;
240 :
241 : /* both NULL? they're equal */
501 drowley 242 CBC 40650 : if (tslot->tts_isnull[i])
501 drowley 243 UBC 0 : continue;
244 :
245 : /* perform binary comparison on the two datums */
501 drowley 246 CBC 40650 : attr = &tslot->tts_tupleDescriptor->attrs[i];
247 40650 : if (!datum_image_eq(tslot->tts_values[i], pslot->tts_values[i],
248 40650 : attr->attbyval, attr->attlen))
501 drowley 249 UBC 0 : return false;
250 : }
501 drowley 251 CBC 38421 : return true;
252 : }
253 : else
254 : {
255 146389 : econtext->ecxt_innertuple = tslot;
256 146389 : econtext->ecxt_outertuple = pslot;
257 146389 : return ExecQualAndReset(mstate->cache_eq_expr, econtext);
258 : }
259 : }
260 :
261 : /*
262 : * Initialize the hash table to empty.
263 : */
264 : static void
634 265 517 : build_hash_table(MemoizeState *mstate, uint32 size)
266 : {
267 : /* Make a guess at a good size when we're not given a valid size. */
737 268 517 : if (size == 0)
737 drowley 269 UBC 0 : size = 1024;
270 :
271 : /* memoize_create will convert the size to a power of 2 */
634 drowley 272 CBC 517 : mstate->hashtable = memoize_create(mstate->tableContext, size, mstate);
737 273 517 : }
274 :
275 : /*
276 : * prepare_probe_slot
277 : * Populate mstate's probeslot with the values from the tuple stored
278 : * in 'key'. If 'key' is NULL, then perform the population by evaluating
279 : * mstate's param_exprs.
280 : */
281 : static inline void
634 282 222946 : prepare_probe_slot(MemoizeState *mstate, MemoizeKey *key)
283 : {
284 222946 : TupleTableSlot *pslot = mstate->probeslot;
285 222946 : TupleTableSlot *tslot = mstate->tableslot;
286 222946 : int numKeys = mstate->nkeys;
287 :
737 288 222946 : ExecClearTuple(pslot);
289 :
290 222946 : if (key == NULL)
291 : {
20 292 221746 : ExprContext *econtext = mstate->ss.ps.ps_ExprContext;
293 : MemoryContext oldcontext;
294 :
295 221746 : oldcontext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
296 :
297 : /* Set the probeslot's values based on the current parameter values */
737 298 445751 : for (int i = 0; i < numKeys; i++)
634 299 224005 : pslot->tts_values[i] = ExecEvalExpr(mstate->param_exprs[i],
300 : econtext,
737 301 224005 : &pslot->tts_isnull[i]);
302 :
20 303 221746 : MemoryContextSwitchTo(oldcontext);
304 : }
305 : else
306 : {
307 : /* Process the key's MinimalTuple and store the values in probeslot */
737 308 1200 : ExecStoreMinimalTuple(key->params, tslot, false);
309 1200 : slot_getallattrs(tslot);
310 1200 : memcpy(pslot->tts_values, tslot->tts_values, sizeof(Datum) * numKeys);
311 1200 : memcpy(pslot->tts_isnull, tslot->tts_isnull, sizeof(bool) * numKeys);
312 : }
313 :
314 222946 : ExecStoreVirtualTuple(pslot);
315 222946 : }
316 :
317 : /*
318 : * entry_purge_tuples
319 : * Remove all tuples from the cache entry pointed to by 'entry'. This
320 : * leaves an empty cache entry. Also, update the memory accounting to
321 : * reflect the removal of the tuples.
322 : */
323 : static inline void
634 324 1194 : entry_purge_tuples(MemoizeState *mstate, MemoizeEntry *entry)
325 : {
326 1194 : MemoizeTuple *tuple = entry->tuplehead;
737 327 1194 : uint64 freed_mem = 0;
328 :
329 2388 : while (tuple != NULL)
330 : {
634 331 1194 : MemoizeTuple *next = tuple->next;
332 :
737 333 1194 : freed_mem += CACHE_TUPLE_BYTES(tuple);
334 :
335 : /* Free memory used for this tuple */
336 1194 : pfree(tuple->mintuple);
337 1194 : pfree(tuple);
338 :
339 1194 : tuple = next;
340 : }
341 :
342 1194 : entry->complete = false;
343 1194 : entry->tuplehead = NULL;
344 :
345 : /* Update the memory accounting */
634 346 1194 : mstate->mem_used -= freed_mem;
737 347 1194 : }
348 :
349 : /*
350 : * remove_cache_entry
351 : * Remove 'entry' from the cache and free memory used by it.
352 : */
353 : static void
634 354 1194 : remove_cache_entry(MemoizeState *mstate, MemoizeEntry *entry)
355 : {
356 1194 : MemoizeKey *key = entry->key;
357 :
737 358 1194 : dlist_delete(&entry->key->lru_node);
359 :
360 : /* Remove all of the tuples from this entry */
634 361 1194 : entry_purge_tuples(mstate, entry);
362 :
363 : /*
364 : * Update memory accounting. entry_purge_tuples should have already
365 : * subtracted the memory used for each cached tuple. Here we just update
366 : * the amount used by the entry itself.
367 : */
368 1194 : mstate->mem_used -= EMPTY_ENTRY_MEMORY_BYTES(entry);
369 :
370 : /* Remove the entry from the cache */
371 1194 : memoize_delete_item(mstate->hashtable, entry);
372 :
737 373 1194 : pfree(key->params);
374 1194 : pfree(key);
375 1194 : }
376 :
377 : /*
378 : * cache_purge_all
379 : * Remove all items from the cache
380 : */
381 : static void
501 382 9 : cache_purge_all(MemoizeState *mstate)
383 : {
384 9 : uint64 evictions = mstate->hashtable->members;
332 tgl 385 9 : PlanState *pstate = (PlanState *) mstate;
386 :
387 : /*
388 : * Likely the most efficient way to remove all items is to just reset the
389 : * memory context for the cache and then rebuild a fresh hash table. This
390 : * saves having to remove each item one by one and pfree each cached tuple
391 : */
501 drowley 392 9 : MemoryContextReset(mstate->tableContext);
393 :
394 : /* Make the hash table the same size as the original size */
395 9 : build_hash_table(mstate, ((Memoize *) pstate->plan)->est_entries);
396 :
397 : /* reset the LRU list */
398 9 : dlist_init(&mstate->lru_list);
399 9 : mstate->last_tuple = NULL;
400 9 : mstate->entry = NULL;
401 :
402 9 : mstate->mem_used = 0;
403 :
404 : /* XXX should we add something new to track these purges? */
405 9 : mstate->stats.cache_evictions += evictions; /* Update Stats */
406 9 : }
407 :
408 : /*
409 : * cache_reduce_memory
410 : * Evict older and less recently used items from the cache in order to
411 : * reduce the memory consumption back to something below the
412 : * MemoizeState's mem_limit.
413 : *
414 : * 'specialkey', if not NULL, causes the function to return false if the entry
415 : * which the key belongs to is removed from the cache.
416 : */
417 : static bool
634 418 1194 : cache_reduce_memory(MemoizeState *mstate, MemoizeKey *specialkey)
419 : {
737 420 1194 : bool specialkey_intact = true; /* for now */
421 : dlist_mutable_iter iter;
422 1194 : uint64 evictions = 0;
423 :
424 : /* Update peak memory usage */
634 425 1194 : if (mstate->mem_used > mstate->stats.mem_peak)
426 3 : mstate->stats.mem_peak = mstate->mem_used;
427 :
428 : /* We expect only to be called when we've gone over budget on memory */
429 1194 : Assert(mstate->mem_used > mstate->mem_limit);
430 :
431 : /* Start the eviction process starting at the head of the LRU list. */
432 1194 : dlist_foreach_modify(iter, &mstate->lru_list)
433 : {
434 1194 : MemoizeKey *key = dlist_container(MemoizeKey, lru_node, iter.cur);
435 : MemoizeEntry *entry;
436 :
437 : /*
438 : * Populate the hash probe slot in preparation for looking up this LRU
439 : * entry.
440 : */
441 1194 : prepare_probe_slot(mstate, key);
442 :
443 : /*
444 : * Ideally the LRU list pointers would be stored in the entry itself
445 : * rather than in the key. Unfortunately, we can't do that as the
446 : * simplehash.h code may resize the table and allocate new memory for
447 : * entries which would result in those pointers pointing to the old
448 : * buckets. However, it's fine to use the key to store this as that's
449 : * only referenced by a pointer in the entry, which of course follows
450 : * the entry whenever the hash table is resized. Since we only have a
451 : * pointer to the key here, we must perform a hash table lookup to
452 : * find the entry that the key belongs to.
453 : */
454 1194 : entry = memoize_lookup(mstate->hashtable, NULL);
455 :
456 : /*
457 : * Sanity check that we found the entry belonging to the LRU list
458 : * item. A misbehaving hash or equality function could cause the
459 : * entry not to be found or the wrong entry to be found.
460 : */
305 461 1194 : if (unlikely(entry == NULL || entry->key != key))
305 drowley 462 UBC 0 : elog(ERROR, "could not find memoization table entry");
463 :
464 : /*
465 : * If we're being called to free memory while the cache is being
466 : * populated with new tuples, then we'd better take some care as we
467 : * could end up freeing the entry which 'specialkey' belongs to.
468 : * Generally callers will pass 'specialkey' as the key for the cache
469 : * entry which is currently being populated, so we must set
470 : * 'specialkey_intact' to false to inform the caller the specialkey
471 : * entry has been removed.
472 : */
737 drowley 473 CBC 1194 : if (key == specialkey)
737 drowley 474 UBC 0 : specialkey_intact = false;
475 :
476 : /*
477 : * Finally remove the entry. This will remove from the LRU list too.
478 : */
634 drowley 479 CBC 1194 : remove_cache_entry(mstate, entry);
480 :
737 481 1194 : evictions++;
482 :
483 : /* Exit if we've freed enough memory */
634 484 1194 : if (mstate->mem_used <= mstate->mem_limit)
737 485 1194 : break;
486 : }
487 :
634 488 1194 : mstate->stats.cache_evictions += evictions; /* Update Stats */
489 :
737 490 1194 : return specialkey_intact;
491 : }
492 :
493 : /*
494 : * cache_lookup
495 : * Perform a lookup to see if we've already cached tuples based on the
496 : * scan's current parameters. If we find an existing entry we move it to
497 : * the end of the LRU list, set *found to true then return it. If we
498 : * don't find an entry then we create a new one and add it to the end of
499 : * the LRU list. We also update cache memory accounting and remove older
500 : * entries if we go over the memory budget. If we managed to free enough
501 : * memory we return the new entry, else we return NULL.
502 : *
503 : * Callers can assume we'll never return NULL when *found is true.
504 : */
505 : static MemoizeEntry *
634 506 221746 : cache_lookup(MemoizeState *mstate, bool *found)
507 : {
508 : MemoizeKey *key;
509 : MemoizeEntry *entry;
510 : MemoryContext oldcontext;
511 :
512 : /* prepare the probe slot with the current scan parameters */
513 221746 : prepare_probe_slot(mstate, NULL);
514 :
515 : /*
516 : * Add the new entry to the cache. No need to pass a valid key since the
517 : * hash function uses mstate's probeslot, which we populated above.
518 : */
519 221746 : entry = memoize_insert(mstate->hashtable, NULL, found);
520 :
737 521 221746 : if (*found)
522 : {
523 : /*
524 : * Move existing entry to the tail of the LRU list to mark it as the
525 : * most recently used item.
526 : */
634 527 183610 : dlist_move_tail(&mstate->lru_list, &entry->key->lru_node);
528 :
737 529 183610 : return entry;
530 : }
531 :
634 532 38136 : oldcontext = MemoryContextSwitchTo(mstate->tableContext);
533 :
534 : /* Allocate a new key */
535 38136 : entry->key = key = (MemoizeKey *) palloc(sizeof(MemoizeKey));
536 38136 : key->params = ExecCopySlotMinimalTuple(mstate->probeslot);
537 :
538 : /* Update the total cache memory utilization */
539 38136 : mstate->mem_used += EMPTY_ENTRY_MEMORY_BYTES(entry);
540 :
541 : /* Initialize this entry */
737 542 38136 : entry->complete = false;
543 38136 : entry->tuplehead = NULL;
544 :
545 : /*
546 : * Since this is the most recently used entry, push this entry onto the
547 : * end of the LRU list.
548 : */
634 549 38136 : dlist_push_tail(&mstate->lru_list, &entry->key->lru_node);
550 :
551 38136 : mstate->last_tuple = NULL;
552 :
737 553 38136 : MemoryContextSwitchTo(oldcontext);
554 :
555 : /*
556 : * If we've gone over our memory budget, then we'll free up some space in
557 : * the cache.
558 : */
634 559 38136 : if (mstate->mem_used > mstate->mem_limit)
560 : {
561 : /*
562 : * Try to free up some memory. It's highly unlikely that we'll fail
563 : * to do so here since the entry we've just added is yet to contain
564 : * any tuples and we're able to remove any other entry to reduce the
565 : * memory consumption.
566 : */
567 1194 : if (unlikely(!cache_reduce_memory(mstate, key)))
737 drowley 568 UBC 0 : return NULL;
569 :
570 : /*
571 : * The process of removing entries from the cache may have caused the
572 : * code in simplehash.h to shuffle elements to earlier buckets in the
573 : * hash table. If it has, we'll need to find the entry again by
574 : * performing a lookup. Fortunately, we can detect if this has
575 : * happened by seeing if the entry is still in use and that the key
576 : * pointer matches our expected key.
577 : */
634 drowley 578 CBC 1194 : if (entry->status != memoize_SH_IN_USE || entry->key != key)
579 : {
580 : /*
581 : * We need to repopulate the probeslot as lookups performed during
582 : * the cache evictions above will have stored some other key.
583 : */
584 6 : prepare_probe_slot(mstate, key);
585 :
586 : /* Re-find the newly added entry */
587 6 : entry = memoize_lookup(mstate->hashtable, NULL);
737 588 6 : Assert(entry != NULL);
589 : }
590 : }
591 :
592 38136 : return entry;
593 : }
594 :
595 : /*
596 : * cache_store_tuple
597 : * Add the tuple stored in 'slot' to the mstate's current cache entry.
598 : * The cache entry must have already been made with cache_lookup().
599 : * mstate's last_tuple field must point to the tail of mstate->entry's
600 : * list of tuples.
601 : */
602 : static bool
634 603 35436 : cache_store_tuple(MemoizeState *mstate, TupleTableSlot *slot)
604 : {
605 : MemoizeTuple *tuple;
606 35436 : MemoizeEntry *entry = mstate->entry;
607 : MemoryContext oldcontext;
608 :
737 609 35436 : Assert(slot != NULL);
610 35436 : Assert(entry != NULL);
611 :
634 612 35436 : oldcontext = MemoryContextSwitchTo(mstate->tableContext);
613 :
614 35436 : tuple = (MemoizeTuple *) palloc(sizeof(MemoizeTuple));
737 615 35436 : tuple->mintuple = ExecCopySlotMinimalTuple(slot);
616 35436 : tuple->next = NULL;
617 :
618 : /* Account for the memory we just consumed */
634 619 35436 : mstate->mem_used += CACHE_TUPLE_BYTES(tuple);
620 :
737 621 35436 : if (entry->tuplehead == NULL)
622 : {
623 : /*
624 : * This is the first tuple for this entry, so just point the list head
625 : * to it.
626 : */
627 35279 : entry->tuplehead = tuple;
628 : }
629 : else
630 : {
631 : /* push this tuple onto the tail of the list */
634 632 157 : mstate->last_tuple->next = tuple;
633 : }
634 :
635 35436 : mstate->last_tuple = tuple;
737 636 35436 : MemoryContextSwitchTo(oldcontext);
637 :
638 : /*
639 : * If we've gone over our memory budget then free up some space in the
640 : * cache.
641 : */
634 642 35436 : if (mstate->mem_used > mstate->mem_limit)
643 : {
634 drowley 644 UBC 0 : MemoizeKey *key = entry->key;
645 :
646 0 : if (!cache_reduce_memory(mstate, key))
737 647 0 : return false;
648 :
649 : /*
650 : * The process of removing entries from the cache may have caused the
651 : * code in simplehash.h to shuffle elements to earlier buckets in the
652 : * hash table. If it has, we'll need to find the entry again by
653 : * performing a lookup. Fortunately, we can detect if this has
654 : * happened by seeing if the entry is still in use and that the key
655 : * pointer matches our expected key.
656 : */
634 657 0 : if (entry->status != memoize_SH_IN_USE || entry->key != key)
658 : {
659 : /*
660 : * We need to repopulate the probeslot as lookups performed during
661 : * the cache evictions above will have stored some other key.
662 : */
663 0 : prepare_probe_slot(mstate, key);
664 :
665 : /* Re-find the entry */
666 0 : mstate->entry = entry = memoize_lookup(mstate->hashtable, NULL);
737 667 0 : Assert(entry != NULL);
668 : }
669 : }
670 :
737 drowley 671 CBC 35436 : return true;
672 : }
673 :
674 : static TupleTableSlot *
634 675 288455 : ExecMemoize(PlanState *pstate)
676 : {
677 288455 : MemoizeState *node = castNode(MemoizeState, pstate);
678 : PlanState *outerNode;
679 : TupleTableSlot *slot;
680 :
681 288455 : switch (node->mstatus)
682 : {
683 221746 : case MEMO_CACHE_LOOKUP:
684 : {
685 : MemoizeEntry *entry;
686 : TupleTableSlot *outerslot;
687 : bool found;
688 :
737 689 221746 : Assert(node->entry == NULL);
690 :
691 : /*
692 : * We're only ever in this state for the first call of the
693 : * scan. Here we have a look to see if we've already seen the
694 : * current parameters before and if we have already cached a
695 : * complete set of records that the outer plan will return for
696 : * these parameters.
697 : *
698 : * When we find a valid cache entry, we'll return the first
699 : * tuple from it. If not found, we'll create a cache entry and
700 : * then try to fetch a tuple from the outer scan. If we find
701 : * one there, we'll try to cache it.
702 : */
703 :
704 : /* see if we've got anything cached for the current parameters */
705 221746 : entry = cache_lookup(node, &found);
706 :
707 221746 : if (found && entry->complete)
708 : {
709 183610 : node->stats.cache_hits += 1; /* stats update */
710 :
711 : /*
712 : * Set last_tuple and entry so that the state
713 : * MEMO_CACHE_FETCH_NEXT_TUPLE can easily find the next
714 : * tuple for these parameters.
715 : */
716 183610 : node->last_tuple = entry->tuplehead;
717 183610 : node->entry = entry;
718 :
719 : /* Fetch the first cached tuple, if there is one */
720 183610 : if (entry->tuplehead)
721 : {
634 722 51353 : node->mstatus = MEMO_CACHE_FETCH_NEXT_TUPLE;
723 :
737 724 51353 : slot = node->ss.ps.ps_ResultTupleSlot;
725 51353 : ExecStoreMinimalTuple(entry->tuplehead->mintuple,
726 : slot, false);
727 :
728 51353 : return slot;
729 : }
730 :
731 : /* The cache entry is void of any tuples. */
634 732 132257 : node->mstatus = MEMO_END_OF_SCAN;
737 733 132257 : return NULL;
734 : }
735 :
736 : /* Handle cache miss */
737 38136 : node->stats.cache_misses += 1; /* stats update */
738 :
739 38136 : if (found)
740 : {
741 : /*
742 : * A cache entry was found, but the scan for that entry
743 : * did not run to completion. We'll just remove all
744 : * tuples and start again. It might be tempting to
745 : * continue where we left off, but there's no guarantee
746 : * the outer node will produce the tuples in the same
747 : * order as it did last time.
748 : */
737 drowley 749 UBC 0 : entry_purge_tuples(node, entry);
750 : }
751 :
752 : /* Scan the outer node for a tuple to cache */
737 drowley 753 CBC 38136 : outerNode = outerPlanState(node);
754 38136 : outerslot = ExecProcNode(outerNode);
755 38136 : if (TupIsNull(outerslot))
756 : {
757 : /*
758 : * cache_lookup may have returned NULL due to failure to
759 : * free enough cache space, so ensure we don't do anything
760 : * here that assumes it worked. There's no need to go into
761 : * bypass mode here as we're setting mstatus to end of
762 : * scan.
763 : */
764 2857 : if (likely(entry))
765 2857 : entry->complete = true;
766 :
634 767 2857 : node->mstatus = MEMO_END_OF_SCAN;
737 768 2857 : return NULL;
769 : }
770 :
771 35279 : node->entry = entry;
772 :
773 : /*
774 : * If we failed to create the entry or failed to store the
775 : * tuple in the entry, then go into bypass mode.
776 : */
777 35279 : if (unlikely(entry == NULL ||
778 : !cache_store_tuple(node, outerslot)))
779 : {
737 drowley 780 UBC 0 : node->stats.cache_overflows += 1; /* stats update */
781 :
634 782 0 : node->mstatus = MEMO_CACHE_BYPASS_MODE;
783 :
784 : /*
785 : * No need to clear out last_tuple as we'll stay in bypass
786 : * mode until the end of the scan.
787 : */
788 : }
789 : else
790 : {
791 : /*
792 : * If we only expect a single row from this scan then we
793 : * can mark that we're not expecting more. This allows
794 : * cache lookups to work even when the scan has not been
795 : * executed to completion.
796 : */
737 drowley 797 CBC 35279 : entry->complete = node->singlerow;
634 798 35279 : node->mstatus = MEMO_FILLING_CACHE;
799 : }
800 :
737 801 35279 : slot = node->ss.ps.ps_ResultTupleSlot;
802 35279 : ExecCopySlot(slot, outerslot);
803 35279 : return slot;
804 : }
805 :
634 806 33054 : case MEMO_CACHE_FETCH_NEXT_TUPLE:
807 : {
808 : /* We shouldn't be in this state if these are not set */
737 809 33054 : Assert(node->entry != NULL);
810 33054 : Assert(node->last_tuple != NULL);
811 :
812 : /* Skip to the next tuple to output */
813 33054 : node->last_tuple = node->last_tuple->next;
814 :
815 : /* No more tuples in the cache */
816 33054 : if (node->last_tuple == NULL)
817 : {
634 818 30564 : node->mstatus = MEMO_END_OF_SCAN;
737 819 30564 : return NULL;
820 : }
821 :
822 2490 : slot = node->ss.ps.ps_ResultTupleSlot;
823 2490 : ExecStoreMinimalTuple(node->last_tuple->mintuple, slot,
824 : false);
825 :
826 2490 : return slot;
827 : }
828 :
634 829 33655 : case MEMO_FILLING_CACHE:
830 : {
831 : TupleTableSlot *outerslot;
832 33655 : MemoizeEntry *entry = node->entry;
833 :
834 : /* entry should already have been set by MEMO_CACHE_LOOKUP */
737 835 33655 : Assert(entry != NULL);
836 :
837 : /*
838 : * When in the MEMO_FILLING_CACHE state, we've just had a
839 : * cache miss and are populating the cache with the current
840 : * scan tuples.
841 : */
842 33655 : outerNode = outerPlanState(node);
843 33655 : outerslot = ExecProcNode(outerNode);
844 33655 : if (TupIsNull(outerslot))
845 : {
846 : /* No more tuples. Mark it as complete */
847 33498 : entry->complete = true;
634 848 33498 : node->mstatus = MEMO_END_OF_SCAN;
737 849 33498 : return NULL;
850 : }
851 :
852 : /*
853 : * Validate if the planner properly set the singlerow flag. It
854 : * should only set that if each cache entry can, at most,
855 : * return 1 row.
856 : */
857 157 : if (unlikely(entry->complete))
737 drowley 858 UBC 0 : elog(ERROR, "cache entry already complete");
859 :
860 : /* Record the tuple in the current cache entry */
737 drowley 861 CBC 157 : if (unlikely(!cache_store_tuple(node, outerslot)))
862 : {
863 : /* Couldn't store it? Handle overflow */
737 drowley 864 UBC 0 : node->stats.cache_overflows += 1; /* stats update */
865 :
634 866 0 : node->mstatus = MEMO_CACHE_BYPASS_MODE;
867 :
868 : /*
869 : * No need to clear out entry or last_tuple as we'll stay
870 : * in bypass mode until the end of the scan.
871 : */
872 : }
873 :
737 drowley 874 CBC 157 : slot = node->ss.ps.ps_ResultTupleSlot;
875 157 : ExecCopySlot(slot, outerslot);
876 157 : return slot;
877 : }
878 :
634 drowley 879 UBC 0 : case MEMO_CACHE_BYPASS_MODE:
880 : {
881 : TupleTableSlot *outerslot;
882 :
883 : /*
884 : * When in bypass mode we just continue to read tuples without
885 : * caching. We need to wait until the next rescan before we
886 : * can come out of this mode.
887 : */
737 888 0 : outerNode = outerPlanState(node);
889 0 : outerslot = ExecProcNode(outerNode);
890 0 : if (TupIsNull(outerslot))
891 : {
634 892 0 : node->mstatus = MEMO_END_OF_SCAN;
737 893 0 : return NULL;
894 : }
895 :
896 0 : slot = node->ss.ps.ps_ResultTupleSlot;
897 0 : ExecCopySlot(slot, outerslot);
898 0 : return slot;
899 : }
900 :
634 901 0 : case MEMO_END_OF_SCAN:
902 :
903 : /*
904 : * We've already returned NULL for this scan, but just in case
905 : * something calls us again by mistake.
906 : */
737 907 0 : return NULL;
908 :
909 0 : default:
634 910 0 : elog(ERROR, "unrecognized memoize state: %d",
911 : (int) node->mstatus);
912 : return NULL;
913 : } /* switch */
914 : }
915 :
916 : MemoizeState *
634 drowley 917 CBC 508 : ExecInitMemoize(Memoize *node, EState *estate, int eflags)
918 : {
919 508 : MemoizeState *mstate = makeNode(MemoizeState);
920 : Plan *outerNode;
921 : int i;
922 : int nkeys;
923 : Oid *eqfuncoids;
924 :
925 : /* check for unsupported flags */
737 926 508 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
927 :
634 928 508 : mstate->ss.ps.plan = (Plan *) node;
929 508 : mstate->ss.ps.state = estate;
930 508 : mstate->ss.ps.ExecProcNode = ExecMemoize;
931 :
932 : /*
933 : * Miscellaneous initialization
934 : *
935 : * create expression context for node
936 : */
937 508 : ExecAssignExprContext(estate, &mstate->ss.ps);
938 :
737 939 508 : outerNode = outerPlan(node);
634 940 508 : outerPlanState(mstate) = ExecInitNode(outerNode, estate, eflags);
941 :
942 : /*
943 : * Initialize return slot and type. No need to initialize projection info
944 : * because this node doesn't do projections.
945 : */
946 508 : ExecInitResultTupleSlotTL(&mstate->ss.ps, &TTSOpsMinimalTuple);
947 508 : mstate->ss.ps.ps_ProjInfo = NULL;
948 :
949 : /*
950 : * Initialize scan slot and type.
951 : */
952 508 : ExecCreateScanSlotFromOuterPlan(estate, &mstate->ss, &TTSOpsMinimalTuple);
953 :
954 : /*
955 : * Set the state machine to lookup the cache. We won't find anything
956 : * until we cache something, but this saves a special case to create the
957 : * first entry.
958 : */
959 508 : mstate->mstatus = MEMO_CACHE_LOOKUP;
960 :
961 508 : mstate->nkeys = nkeys = node->numKeys;
962 508 : mstate->hashkeydesc = ExecTypeFromExprList(node->param_exprs);
963 508 : mstate->tableslot = MakeSingleTupleTableSlot(mstate->hashkeydesc,
964 : &TTSOpsMinimalTuple);
965 508 : mstate->probeslot = MakeSingleTupleTableSlot(mstate->hashkeydesc,
966 : &TTSOpsVirtual);
967 :
968 508 : mstate->param_exprs = (ExprState **) palloc(nkeys * sizeof(ExprState *));
969 508 : mstate->collations = node->collations; /* Just point directly to the plan
970 : * data */
971 508 : mstate->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
972 :
737 973 508 : eqfuncoids = palloc(nkeys * sizeof(Oid));
974 :
975 1025 : for (i = 0; i < nkeys; i++)
976 : {
977 517 : Oid hashop = node->hashOperators[i];
978 : Oid left_hashfn;
979 : Oid right_hashfn;
980 517 : Expr *param_expr = (Expr *) list_nth(node->param_exprs, i);
981 :
982 517 : if (!get_op_hash_functions(hashop, &left_hashfn, &right_hashfn))
737 drowley 983 UBC 0 : elog(ERROR, "could not find hash function for hash operator %u",
984 : hashop);
985 :
634 drowley 986 CBC 517 : fmgr_info(left_hashfn, &mstate->hashfunctions[i]);
987 :
988 517 : mstate->param_exprs[i] = ExecInitExpr(param_expr, (PlanState *) mstate);
737 989 517 : eqfuncoids[i] = get_opcode(hashop);
990 : }
991 :
634 992 1016 : mstate->cache_eq_expr = ExecBuildParamSetEqual(mstate->hashkeydesc,
993 : &TTSOpsMinimalTuple,
994 : &TTSOpsVirtual,
995 : eqfuncoids,
996 508 : node->collations,
997 508 : node->param_exprs,
998 : (PlanState *) mstate);
999 :
737 1000 508 : pfree(eqfuncoids);
634 1001 508 : mstate->mem_used = 0;
1002 :
1003 : /* Limit the total memory consumed by the cache to this */
623 tgl 1004 508 : mstate->mem_limit = get_hash_memory_limit();
1005 :
1006 : /* A memory context dedicated for the cache */
634 drowley 1007 508 : mstate->tableContext = AllocSetContextCreate(CurrentMemoryContext,
1008 : "MemoizeHashTable",
1009 : ALLOCSET_DEFAULT_SIZES);
1010 :
1011 508 : dlist_init(&mstate->lru_list);
1012 508 : mstate->last_tuple = NULL;
1013 508 : mstate->entry = NULL;
1014 :
1015 : /*
1016 : * Mark if we can assume the cache entry is completed after we get the
1017 : * first record for it. Some callers might not call us again after
1018 : * getting the first match. e.g. A join operator performing a unique join
1019 : * is able to skip to the next outer tuple after getting the first
1020 : * matching inner tuple. In this case, the cache entry is complete after
1021 : * getting the first tuple. This allows us to mark it as so.
1022 : */
1023 508 : mstate->singlerow = node->singlerow;
501 1024 508 : mstate->keyparamids = node->keyparamids;
1025 :
1026 : /*
1027 : * Record if the cache keys should be compared bit by bit, or logically
1028 : * using the type's hash equality operator
1029 : */
1030 508 : mstate->binary_mode = node->binary_mode;
1031 :
1032 : /* Zero the statistics counters */
634 1033 508 : memset(&mstate->stats, 0, sizeof(MemoizeInstrumentation));
1034 :
1035 : /* Allocate and set up the actual cache */
1036 508 : build_hash_table(mstate, node->est_entries);
1037 :
1038 508 : return mstate;
1039 : }
1040 :
1041 : void
1042 508 : ExecEndMemoize(MemoizeState *node)
1043 : {
1044 : #ifdef USE_ASSERT_CHECKING
1045 : /* Validate the memory accounting code is correct in assert builds. */
1046 : {
1047 : int count;
700 1048 508 : uint64 mem = 0;
1049 : memoize_iterator i;
1050 : MemoizeEntry *entry;
1051 :
634 1052 508 : memoize_start_iterate(node->hashtable, &i);
1053 :
700 1054 508 : count = 0;
634 1055 37000 : while ((entry = memoize_iterate(node->hashtable, &i)) != NULL)
1056 : {
1057 36492 : MemoizeTuple *tuple = entry->tuplehead;
1058 :
700 1059 36492 : mem += EMPTY_ENTRY_MEMORY_BYTES(entry);
1060 70734 : while (tuple != NULL)
1061 : {
1062 34242 : mem += CACHE_TUPLE_BYTES(tuple);
1063 34242 : tuple = tuple->next;
1064 : }
1065 36492 : count++;
1066 : }
1067 :
1068 508 : Assert(count == node->hashtable->members);
1069 508 : Assert(mem == node->mem_used);
1070 : }
1071 : #endif
1072 :
1073 : /*
1074 : * When ending a parallel worker, copy the statistics gathered by the
1075 : * worker back into shared memory so that it can be picked up by the main
1076 : * process to report in EXPLAIN ANALYZE.
1077 : */
737 1078 508 : if (node->shared_info != NULL && IsParallelWorker())
1079 : {
1080 : MemoizeInstrumentation *si;
1081 :
1082 : /* Make mem_peak available for EXPLAIN */
737 drowley 1083 UBC 0 : if (node->stats.mem_peak == 0)
1084 0 : node->stats.mem_peak = node->mem_used;
1085 :
1086 0 : Assert(ParallelWorkerNumber <= node->shared_info->num_workers);
1087 0 : si = &node->shared_info->sinstrument[ParallelWorkerNumber];
634 1088 0 : memcpy(si, &node->stats, sizeof(MemoizeInstrumentation));
1089 : }
1090 :
1091 : /* Remove the cache context */
737 drowley 1092 CBC 508 : MemoryContextDelete(node->tableContext);
1093 :
1094 508 : ExecClearTuple(node->ss.ss_ScanTupleSlot);
1095 : /* must drop pointer to cache result tuple */
1096 508 : ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
1097 :
1098 : /*
1099 : * free exprcontext
1100 : */
1101 508 : ExecFreeExprContext(&node->ss.ps);
1102 :
1103 : /*
1104 : * shut down the subplan
1105 : */
1106 508 : ExecEndNode(outerPlanState(node));
1107 508 : }
1108 :
1109 : void
634 1110 221746 : ExecReScanMemoize(MemoizeState *node)
1111 : {
737 1112 221746 : PlanState *outerPlan = outerPlanState(node);
1113 :
1114 : /* Mark that we must lookup the cache for a new set of parameters */
634 1115 221746 : node->mstatus = MEMO_CACHE_LOOKUP;
1116 :
1117 : /* nullify pointers used for the last scan */
737 1118 221746 : node->entry = NULL;
1119 221746 : node->last_tuple = NULL;
1120 :
1121 : /*
1122 : * if chgParam of subnode is not null then plan will be re-scanned by
1123 : * first ExecProcNode.
1124 : */
1125 221746 : if (outerPlan->chgParam == NULL)
737 drowley 1126 UBC 0 : ExecReScan(outerPlan);
1127 :
1128 : /*
1129 : * Purge the entire cache if a parameter changed that is not part of the
1130 : * cache key.
1131 : */
501 drowley 1132 CBC 221746 : if (bms_nonempty_difference(outerPlan->chgParam, node->keyparamids))
1133 9 : cache_purge_all(node);
737 1134 221746 : }
1135 :
1136 : /*
1137 : * ExecEstimateCacheEntryOverheadBytes
1138 : * For use in the query planner to help it estimate the amount of memory
1139 : * required to store a single entry in the cache.
1140 : */
1141 : double
1142 89794 : ExecEstimateCacheEntryOverheadBytes(double ntuples)
1143 : {
634 1144 89794 : return sizeof(MemoizeEntry) + sizeof(MemoizeKey) + sizeof(MemoizeTuple) *
1145 : ntuples;
1146 : }
1147 :
1148 : /* ----------------------------------------------------------------
1149 : * Parallel Query Support
1150 : * ----------------------------------------------------------------
1151 : */
1152 :
1153 : /* ----------------------------------------------------------------
1154 : * ExecMemoizeEstimate
1155 : *
1156 : * Estimate space required to propagate memoize statistics.
1157 : * ----------------------------------------------------------------
1158 : */
1159 : void
1160 3 : ExecMemoizeEstimate(MemoizeState *node, ParallelContext *pcxt)
1161 : {
1162 : Size size;
1163 :
1164 : /* don't need this if not instrumenting or no workers */
737 1165 3 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1166 3 : return;
1167 :
634 drowley 1168 UBC 0 : size = mul_size(pcxt->nworkers, sizeof(MemoizeInstrumentation));
1169 0 : size = add_size(size, offsetof(SharedMemoizeInfo, sinstrument));
737 1170 0 : shm_toc_estimate_chunk(&pcxt->estimator, size);
1171 0 : shm_toc_estimate_keys(&pcxt->estimator, 1);
1172 : }
1173 :
1174 : /* ----------------------------------------------------------------
1175 : * ExecMemoizeInitializeDSM
1176 : *
1177 : * Initialize DSM space for memoize statistics.
1178 : * ----------------------------------------------------------------
1179 : */
1180 : void
634 drowley 1181 CBC 3 : ExecMemoizeInitializeDSM(MemoizeState *node, ParallelContext *pcxt)
1182 : {
1183 : Size size;
1184 :
1185 : /* don't need this if not instrumenting or no workers */
737 1186 3 : if (!node->ss.ps.instrument || pcxt->nworkers == 0)
1187 3 : return;
1188 :
634 drowley 1189 UBC 0 : size = offsetof(SharedMemoizeInfo, sinstrument)
1190 0 : + pcxt->nworkers * sizeof(MemoizeInstrumentation);
737 1191 0 : node->shared_info = shm_toc_allocate(pcxt->toc, size);
1192 : /* ensure any unfilled slots will contain zeroes */
1193 0 : memset(node->shared_info, 0, size);
1194 0 : node->shared_info->num_workers = pcxt->nworkers;
1195 0 : shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
1196 0 : node->shared_info);
1197 : }
1198 :
1199 : /* ----------------------------------------------------------------
1200 : * ExecMemoizeInitializeWorker
1201 : *
1202 : * Attach worker to DSM space for memoize statistics.
1203 : * ----------------------------------------------------------------
1204 : */
1205 : void
634 drowley 1206 CBC 6 : ExecMemoizeInitializeWorker(MemoizeState *node, ParallelWorkerContext *pwcxt)
1207 : {
737 1208 6 : node->shared_info =
1209 6 : shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, true);
1210 6 : }
1211 :
1212 : /* ----------------------------------------------------------------
1213 : * ExecMemoizeRetrieveInstrumentation
1214 : *
1215 : * Transfer memoize statistics from DSM to private memory.
1216 : * ----------------------------------------------------------------
1217 : */
1218 : void
634 drowley 1219 UBC 0 : ExecMemoizeRetrieveInstrumentation(MemoizeState *node)
1220 : {
1221 : Size size;
1222 : SharedMemoizeInfo *si;
1223 :
737 1224 0 : if (node->shared_info == NULL)
1225 0 : return;
1226 :
634 1227 0 : size = offsetof(SharedMemoizeInfo, sinstrument)
1228 0 : + node->shared_info->num_workers * sizeof(MemoizeInstrumentation);
737 1229 0 : si = palloc(size);
1230 0 : memcpy(si, node->shared_info, size);
1231 0 : node->shared_info = si;
1232 : }
|