TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * hash_xlog.c
4 : * WAL replay logic for hash index.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/hash/hash_xlog.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/bufmask.h"
18 : #include "access/hash.h"
19 : #include "access/hash_xlog.h"
20 : #include "access/transam.h"
21 : #include "access/xlog.h"
22 : #include "access/xlogutils.h"
23 : #include "miscadmin.h"
24 : #include "storage/procarray.h"
25 :
26 : /*
27 : * replay a hash index meta page
28 : */
29 : static void
30 CBC 21 : hash_xlog_init_meta_page(XLogReaderState *record)
31 : {
32 21 : XLogRecPtr lsn = record->EndRecPtr;
33 : Page page;
34 : Buffer metabuf;
35 : ForkNumber forknum;
36 :
37 21 : xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record);
38 :
39 : /* create the index' metapage */
40 21 : metabuf = XLogInitBufferForRedo(record, 0);
41 21 : Assert(BufferIsValid(metabuf));
42 21 : _hash_init_metabuffer(metabuf, xlrec->num_tuples, xlrec->procid,
43 21 : xlrec->ffactor, true);
44 21 : page = (Page) BufferGetPage(metabuf);
45 21 : PageSetLSN(page, lsn);
46 21 : MarkBufferDirty(metabuf);
47 :
48 : /*
49 : * Force the on-disk state of init forks to always be in sync with the
50 : * state in shared buffers. See XLogReadBufferForRedoExtended. We need
51 : * special handling for init forks as create index operations don't log a
52 : * full page image of the metapage.
53 : */
54 21 : XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
55 21 : if (forknum == INIT_FORKNUM)
56 1 : FlushOneBuffer(metabuf);
57 :
58 : /* all done */
59 21 : UnlockReleaseBuffer(metabuf);
60 21 : }
61 :
62 : /*
63 : * replay a hash index bitmap page
64 : */
65 : static void
66 21 : hash_xlog_init_bitmap_page(XLogReaderState *record)
67 : {
68 21 : XLogRecPtr lsn = record->EndRecPtr;
69 : Buffer bitmapbuf;
70 : Buffer metabuf;
71 : Page page;
72 : HashMetaPage metap;
73 : uint32 num_buckets;
74 : ForkNumber forknum;
75 :
76 21 : xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) XLogRecGetData(record);
77 :
78 : /*
79 : * Initialize bitmap page
80 : */
81 21 : bitmapbuf = XLogInitBufferForRedo(record, 0);
82 21 : _hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true);
83 21 : PageSetLSN(BufferGetPage(bitmapbuf), lsn);
84 21 : MarkBufferDirty(bitmapbuf);
85 :
86 : /*
87 : * Force the on-disk state of init forks to always be in sync with the
88 : * state in shared buffers. See XLogReadBufferForRedoExtended. We need
89 : * special handling for init forks as create index operations don't log a
90 : * full page image of the metapage.
91 : */
92 21 : XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
93 21 : if (forknum == INIT_FORKNUM)
94 1 : FlushOneBuffer(bitmapbuf);
95 21 : UnlockReleaseBuffer(bitmapbuf);
96 :
97 : /* add the new bitmap page to the metapage's list of bitmaps */
98 21 : if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
99 : {
100 : /*
101 : * Note: in normal operation, we'd update the metapage while still
102 : * holding lock on the bitmap page. But during replay it's not
103 : * necessary to hold that lock, since nobody can see it yet; the
104 : * creating transaction hasn't yet committed.
105 : */
106 21 : page = BufferGetPage(metabuf);
107 21 : metap = HashPageGetMeta(page);
108 :
109 21 : num_buckets = metap->hashm_maxbucket + 1;
110 21 : metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
111 21 : metap->hashm_nmaps++;
112 :
113 21 : PageSetLSN(page, lsn);
114 21 : MarkBufferDirty(metabuf);
115 :
116 21 : XLogRecGetBlockTag(record, 1, NULL, &forknum, NULL);
117 21 : if (forknum == INIT_FORKNUM)
118 1 : FlushOneBuffer(metabuf);
119 : }
120 21 : if (BufferIsValid(metabuf))
121 21 : UnlockReleaseBuffer(metabuf);
122 21 : }
123 :
124 : /*
125 : * replay a hash index insert without split
126 : */
127 : static void
128 113999 : hash_xlog_insert(XLogReaderState *record)
129 : {
130 : HashMetaPage metap;
131 113999 : XLogRecPtr lsn = record->EndRecPtr;
132 113999 : xl_hash_insert *xlrec = (xl_hash_insert *) XLogRecGetData(record);
133 : Buffer buffer;
134 : Page page;
135 :
136 113999 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
137 : {
138 : Size datalen;
139 113995 : char *datapos = XLogRecGetBlockData(record, 0, &datalen);
140 :
141 113995 : page = BufferGetPage(buffer);
142 :
143 113995 : if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
144 : false, false) == InvalidOffsetNumber)
145 UBC 0 : elog(PANIC, "hash_xlog_insert: failed to add item");
146 :
147 CBC 113995 : PageSetLSN(page, lsn);
148 113995 : MarkBufferDirty(buffer);
149 : }
150 113999 : if (BufferIsValid(buffer))
151 113999 : UnlockReleaseBuffer(buffer);
152 :
153 113999 : if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO)
154 : {
155 : /*
156 : * Note: in normal operation, we'd update the metapage while still
157 : * holding lock on the page we inserted into. But during replay it's
158 : * not necessary to hold that lock, since no other index updates can
159 : * be happening concurrently.
160 : */
161 113998 : page = BufferGetPage(buffer);
162 113998 : metap = HashPageGetMeta(page);
163 113998 : metap->hashm_ntuples += 1;
164 :
165 113998 : PageSetLSN(page, lsn);
166 113998 : MarkBufferDirty(buffer);
167 : }
168 113999 : if (BufferIsValid(buffer))
169 113999 : UnlockReleaseBuffer(buffer);
170 113999 : }
171 :
172 : /*
173 : * replay addition of overflow page for hash index
174 : */
175 : static void
176 54 : hash_xlog_add_ovfl_page(XLogReaderState *record)
177 : {
178 54 : XLogRecPtr lsn = record->EndRecPtr;
179 54 : xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *) XLogRecGetData(record);
180 : Buffer leftbuf;
181 : Buffer ovflbuf;
182 : Buffer metabuf;
183 : BlockNumber leftblk;
184 : BlockNumber rightblk;
185 54 : BlockNumber newmapblk = InvalidBlockNumber;
186 : Page ovflpage;
187 : HashPageOpaque ovflopaque;
188 : uint32 *num_bucket;
189 : char *data;
190 : Size datalen PG_USED_FOR_ASSERTS_ONLY;
191 54 : bool new_bmpage = false;
192 :
193 54 : XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk);
194 54 : XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk);
195 :
196 54 : ovflbuf = XLogInitBufferForRedo(record, 0);
197 54 : Assert(BufferIsValid(ovflbuf));
198 :
199 54 : data = XLogRecGetBlockData(record, 0, &datalen);
200 54 : num_bucket = (uint32 *) data;
201 54 : Assert(datalen == sizeof(uint32));
202 54 : _hash_initbuf(ovflbuf, InvalidBlockNumber, *num_bucket, LH_OVERFLOW_PAGE,
203 : true);
204 : /* update backlink */
205 54 : ovflpage = BufferGetPage(ovflbuf);
206 54 : ovflopaque = HashPageGetOpaque(ovflpage);
207 54 : ovflopaque->hasho_prevblkno = leftblk;
208 :
209 54 : PageSetLSN(ovflpage, lsn);
210 54 : MarkBufferDirty(ovflbuf);
211 :
212 54 : if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO)
213 : {
214 : Page leftpage;
215 : HashPageOpaque leftopaque;
216 :
217 54 : leftpage = BufferGetPage(leftbuf);
218 54 : leftopaque = HashPageGetOpaque(leftpage);
219 54 : leftopaque->hasho_nextblkno = rightblk;
220 :
221 54 : PageSetLSN(leftpage, lsn);
222 54 : MarkBufferDirty(leftbuf);
223 : }
224 :
225 54 : if (BufferIsValid(leftbuf))
226 54 : UnlockReleaseBuffer(leftbuf);
227 54 : UnlockReleaseBuffer(ovflbuf);
228 :
229 : /*
230 : * Note: in normal operation, we'd update the bitmap and meta page while
231 : * still holding lock on the overflow pages. But during replay it's not
232 : * necessary to hold those locks, since no other index updates can be
233 : * happening concurrently.
234 : */
235 54 : if (XLogRecHasBlockRef(record, 2))
236 : {
237 : Buffer mapbuffer;
238 :
239 12 : if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO)
240 : {
241 12 : Page mappage = (Page) BufferGetPage(mapbuffer);
242 12 : uint32 *freep = NULL;
243 : uint32 *bitmap_page_bit;
244 ECB :
245 GIC 12 : freep = HashPageGetBitmap(mappage);
246 ECB :
247 CBC 12 : data = XLogRecGetBlockData(record, 2, &datalen);
248 GIC 12 : bitmap_page_bit = (uint32 *) data;
249 ECB :
250 GIC 12 : SETBIT(freep, *bitmap_page_bit);
251 ECB :
252 CBC 12 : PageSetLSN(mappage, lsn);
253 GIC 12 : MarkBufferDirty(mapbuffer);
254 ECB : }
255 CBC 12 : if (BufferIsValid(mapbuffer))
256 GIC 12 : UnlockReleaseBuffer(mapbuffer);
257 : }
258 ECB :
259 GIC 54 : if (XLogRecHasBlockRef(record, 3))
260 : {
261 : Buffer newmapbuf;
262 EUB :
263 UIC 0 : newmapbuf = XLogInitBufferForRedo(record, 3);
264 EUB :
265 UIC 0 : _hash_initbitmapbuffer(newmapbuf, xlrec->bmsize, true);
266 EUB :
267 UBC 0 : new_bmpage = true;
268 UIC 0 : newmapblk = BufferGetBlockNumber(newmapbuf);
269 EUB :
270 UBC 0 : MarkBufferDirty(newmapbuf);
271 UIC 0 : PageSetLSN(BufferGetPage(newmapbuf), lsn);
272 EUB :
273 UIC 0 : UnlockReleaseBuffer(newmapbuf);
274 : }
275 ECB :
276 GIC 54 : if (XLogReadBufferForRedo(record, 4, &metabuf) == BLK_NEEDS_REDO)
277 : {
278 : HashMetaPage metap;
279 : Page page;
280 : uint32 *firstfree_ovflpage;
281 ECB :
282 CBC 54 : data = XLogRecGetBlockData(record, 4, &datalen);
283 GIC 54 : firstfree_ovflpage = (uint32 *) data;
284 ECB :
285 CBC 54 : page = BufferGetPage(metabuf);
286 54 : metap = HashPageGetMeta(page);
287 GIC 54 : metap->hashm_firstfree = *firstfree_ovflpage;
288 ECB :
289 GIC 54 : if (!xlrec->bmpage_found)
290 ECB : {
291 GIC 42 : metap->hashm_spares[metap->hashm_ovflpoint]++;
292 ECB :
293 GIC 42 : if (new_bmpage)
294 EUB : {
295 UIC 0 : Assert(BlockNumberIsValid(newmapblk));
296 EUB :
297 UBC 0 : metap->hashm_mapp[metap->hashm_nmaps] = newmapblk;
298 0 : metap->hashm_nmaps++;
299 UIC 0 : metap->hashm_spares[metap->hashm_ovflpoint]++;
300 : }
301 : }
302 ECB :
303 CBC 54 : PageSetLSN(page, lsn);
304 GIC 54 : MarkBufferDirty(metabuf);
305 ECB : }
306 CBC 54 : if (BufferIsValid(metabuf))
307 54 : UnlockReleaseBuffer(metabuf);
308 GIC 54 : }
309 :
310 : /*
311 : * replay allocation of page for split operation
312 : */
313 ECB : static void
314 GIC 221 : hash_xlog_split_allocate_page(XLogReaderState *record)
315 ECB : {
316 CBC 221 : XLogRecPtr lsn = record->EndRecPtr;
317 GIC 221 : xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *) XLogRecGetData(record);
318 : Buffer oldbuf;
319 : Buffer newbuf;
320 : Buffer metabuf;
321 : Size datalen PG_USED_FOR_ASSERTS_ONLY;
322 : char *data;
323 : XLogRedoAction action;
324 :
325 : /*
326 : * To be consistent with normal operation, here we take cleanup locks on
327 : * both the old and new buckets even though there can't be any concurrent
328 : * inserts.
329 : */
330 :
331 ECB : /* replay the record for old bucket */
332 GIC 221 : action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &oldbuf);
333 :
334 : /*
335 : * Note that we still update the page even if it was restored from a full
336 : * page image, because the special space is not included in the image.
337 ECB : */
338 GIC 221 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
339 : {
340 : Page oldpage;
341 : HashPageOpaque oldopaque;
342 ECB :
343 CBC 221 : oldpage = BufferGetPage(oldbuf);
344 GIC 221 : oldopaque = HashPageGetOpaque(oldpage);
345 ECB :
346 CBC 221 : oldopaque->hasho_flag = xlrec->old_bucket_flag;
347 GIC 221 : oldopaque->hasho_prevblkno = xlrec->new_bucket;
348 ECB :
349 CBC 221 : PageSetLSN(oldpage, lsn);
350 GIC 221 : MarkBufferDirty(oldbuf);
351 : }
352 :
353 ECB : /* replay the record for new bucket */
354 GIC 221 : XLogReadBufferForRedoExtended(record, 1, RBM_ZERO_AND_CLEANUP_LOCK, true,
355 ECB : &newbuf);
356 CBC 221 : _hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket,
357 221 : xlrec->new_bucket_flag, true);
358 221 : MarkBufferDirty(newbuf);
359 GIC 221 : PageSetLSN(BufferGetPage(newbuf), lsn);
360 :
361 : /*
362 : * We can release the lock on old bucket early as well but doing here to
363 : * consistent with normal operation.
364 ECB : */
365 CBC 221 : if (BufferIsValid(oldbuf))
366 221 : UnlockReleaseBuffer(oldbuf);
367 221 : if (BufferIsValid(newbuf))
368 GIC 221 : UnlockReleaseBuffer(newbuf);
369 :
370 : /*
371 : * Note: in normal operation, we'd update the meta page while still
372 : * holding lock on the old and new bucket pages. But during replay it's
373 : * not necessary to hold those locks, since no other bucket splits can be
374 : * happening concurrently.
375 : */
376 :
377 ECB : /* replay the record for metapage changes */
378 GIC 221 : if (XLogReadBufferForRedo(record, 2, &metabuf) == BLK_NEEDS_REDO)
379 : {
380 : Page page;
381 : HashMetaPage metap;
382 ECB :
383 CBC 221 : page = BufferGetPage(metabuf);
384 221 : metap = HashPageGetMeta(page);
385 GIC 221 : metap->hashm_maxbucket = xlrec->new_bucket;
386 ECB :
387 GIC 221 : data = XLogRecGetBlockData(record, 2, &datalen);
388 ECB :
389 GIC 221 : if (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS)
390 : {
391 : uint32 lowmask;
392 : uint32 *highmask;
393 :
394 ECB : /* extract low and high masks. */
395 CBC 3 : memcpy(&lowmask, data, sizeof(uint32));
396 GIC 3 : highmask = (uint32 *) ((char *) data + sizeof(uint32));
397 :
398 ECB : /* update metapage */
399 CBC 3 : metap->hashm_lowmask = lowmask;
400 GIC 3 : metap->hashm_highmask = *highmask;
401 ECB :
402 GIC 3 : data += sizeof(uint32) * 2;
403 : }
404 ECB :
405 GIC 221 : if (xlrec->flags & XLH_SPLIT_META_UPDATE_SPLITPOINT)
406 : {
407 : uint32 ovflpoint;
408 : uint32 *ovflpages;
409 :
410 ECB : /* extract information of overflow pages. */
411 CBC 9 : memcpy(&ovflpoint, data, sizeof(uint32));
412 GIC 9 : ovflpages = (uint32 *) ((char *) data + sizeof(uint32));
413 :
414 ECB : /* update metapage */
415 CBC 9 : metap->hashm_spares[ovflpoint] = *ovflpages;
416 GIC 9 : metap->hashm_ovflpoint = ovflpoint;
417 : }
418 ECB :
419 CBC 221 : MarkBufferDirty(metabuf);
420 GIC 221 : PageSetLSN(BufferGetPage(metabuf), lsn);
421 : }
422 ECB :
423 CBC 221 : if (BufferIsValid(metabuf))
424 221 : UnlockReleaseBuffer(metabuf);
425 GIC 221 : }
426 :
427 : /*
428 : * replay of split operation
429 : */
430 ECB : static void
431 GIC 234 : hash_xlog_split_page(XLogReaderState *record)
432 : {
433 : Buffer buf;
434 ECB :
435 GBC 234 : if (XLogReadBufferForRedo(record, 0, &buf) != BLK_RESTORED)
436 UIC 0 : elog(ERROR, "Hash split record did not contain a full-page image");
437 ECB :
438 CBC 234 : UnlockReleaseBuffer(buf);
439 GIC 234 : }
440 :
441 : /*
442 : * replay completion of split operation
443 : */
444 ECB : static void
445 GIC 221 : hash_xlog_split_complete(XLogReaderState *record)
446 ECB : {
447 CBC 221 : XLogRecPtr lsn = record->EndRecPtr;
448 GIC 221 : xl_hash_split_complete *xlrec = (xl_hash_split_complete *) XLogRecGetData(record);
449 : Buffer oldbuf;
450 : Buffer newbuf;
451 : XLogRedoAction action;
452 :
453 ECB : /* replay the record for old bucket */
454 GIC 221 : action = XLogReadBufferForRedo(record, 0, &oldbuf);
455 :
456 : /*
457 : * Note that we still update the page even if it was restored from a full
458 : * page image, because the bucket flag is not included in the image.
459 ECB : */
460 GIC 221 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
461 : {
462 : Page oldpage;
463 : HashPageOpaque oldopaque;
464 ECB :
465 CBC 221 : oldpage = BufferGetPage(oldbuf);
466 GIC 221 : oldopaque = HashPageGetOpaque(oldpage);
467 ECB :
468 GIC 221 : oldopaque->hasho_flag = xlrec->old_bucket_flag;
469 ECB :
470 CBC 221 : PageSetLSN(oldpage, lsn);
471 GIC 221 : MarkBufferDirty(oldbuf);
472 ECB : }
473 CBC 221 : if (BufferIsValid(oldbuf))
474 GIC 221 : UnlockReleaseBuffer(oldbuf);
475 :
476 ECB : /* replay the record for new bucket */
477 GIC 221 : action = XLogReadBufferForRedo(record, 1, &newbuf);
478 :
479 : /*
480 : * Note that we still update the page even if it was restored from a full
481 : * page image, because the bucket flag is not included in the image.
482 ECB : */
483 GIC 221 : if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
484 : {
485 : Page newpage;
486 : HashPageOpaque nopaque;
487 ECB :
488 CBC 221 : newpage = BufferGetPage(newbuf);
489 GIC 221 : nopaque = HashPageGetOpaque(newpage);
490 ECB :
491 GIC 221 : nopaque->hasho_flag = xlrec->new_bucket_flag;
492 ECB :
493 CBC 221 : PageSetLSN(newpage, lsn);
494 GIC 221 : MarkBufferDirty(newbuf);
495 ECB : }
496 CBC 221 : if (BufferIsValid(newbuf))
497 221 : UnlockReleaseBuffer(newbuf);
498 GIC 221 : }
499 :
500 : /*
501 : * replay move of page contents for squeeze operation of hash index
502 : */
503 EUB : static void
504 UIC 0 : hash_xlog_move_page_contents(XLogReaderState *record)
505 EUB : {
506 UBC 0 : XLogRecPtr lsn = record->EndRecPtr;
507 0 : xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) XLogRecGetData(record);
508 0 : Buffer bucketbuf = InvalidBuffer;
509 0 : Buffer writebuf = InvalidBuffer;
510 UIC 0 : Buffer deletebuf = InvalidBuffer;
511 : XLogRedoAction action;
512 :
513 : /*
514 : * Ensure we have a cleanup lock on primary bucket page before we start
515 : * with the actual replay operation. This is to ensure that neither a
516 : * scan can start nor a scan can be already-in-progress during the replay
517 : * of this operation. If we allow scans during this operation, then they
518 : * can miss some records or show the same record multiple times.
519 EUB : */
520 UBC 0 : if (xldata->is_prim_bucket_same_wrt)
521 UIC 0 : action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
522 : else
523 : {
524 : /*
525 : * we don't care for return value as the purpose of reading bucketbuf
526 : * is to ensure a cleanup lock on primary bucket page.
527 EUB : */
528 UIC 0 : (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
529 EUB :
530 UIC 0 : action = XLogReadBufferForRedo(record, 1, &writebuf);
531 : }
532 :
533 EUB : /* replay the record for adding entries in overflow buffer */
534 UIC 0 : if (action == BLK_NEEDS_REDO)
535 : {
536 : Page writepage;
537 : char *begin;
538 : char *data;
539 EUB : Size datalen;
540 UIC 0 : uint16 ninserted = 0;
541 EUB :
542 UIC 0 : data = begin = XLogRecGetBlockData(record, 1, &datalen);
543 EUB :
544 UIC 0 : writepage = (Page) BufferGetPage(writebuf);
545 EUB :
546 UIC 0 : if (xldata->ntups > 0)
547 EUB : {
548 UIC 0 : OffsetNumber *towrite = (OffsetNumber *) data;
549 EUB :
550 UIC 0 : data += sizeof(OffsetNumber) * xldata->ntups;
551 EUB :
552 UIC 0 : while (data - begin < datalen)
553 EUB : {
554 UIC 0 : IndexTuple itup = (IndexTuple) data;
555 : Size itemsz;
556 : OffsetNumber l;
557 EUB :
558 UBC 0 : itemsz = IndexTupleSize(itup);
559 UIC 0 : itemsz = MAXALIGN(itemsz);
560 EUB :
561 UIC 0 : data += itemsz;
562 EUB :
563 UBC 0 : l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
564 0 : if (l == InvalidOffsetNumber)
565 UIC 0 : elog(ERROR, "hash_xlog_move_page_contents: failed to add item to hash index page, size %d bytes",
566 : (int) itemsz);
567 EUB :
568 UIC 0 : ninserted++;
569 : }
570 : }
571 :
572 : /*
573 : * number of tuples inserted must be same as requested in REDO record.
574 EUB : */
575 UIC 0 : Assert(ninserted == xldata->ntups);
576 EUB :
577 UBC 0 : PageSetLSN(writepage, lsn);
578 UIC 0 : MarkBufferDirty(writebuf);
579 : }
580 :
581 EUB : /* replay the record for deleting entries from overflow buffer */
582 UIC 0 : if (XLogReadBufferForRedo(record, 2, &deletebuf) == BLK_NEEDS_REDO)
583 : {
584 : Page page;
585 : char *ptr;
586 : Size len;
587 EUB :
588 UIC 0 : ptr = XLogRecGetBlockData(record, 2, &len);
589 EUB :
590 UIC 0 : page = (Page) BufferGetPage(deletebuf);
591 EUB :
592 UIC 0 : if (len > 0)
593 : {
594 : OffsetNumber *unused;
595 : OffsetNumber *unend;
596 EUB :
597 UBC 0 : unused = (OffsetNumber *) ptr;
598 UIC 0 : unend = (OffsetNumber *) ((char *) ptr + len);
599 EUB :
600 UBC 0 : if ((unend - unused) > 0)
601 UIC 0 : PageIndexMultiDelete(page, unused, unend - unused);
602 : }
603 EUB :
604 UBC 0 : PageSetLSN(page, lsn);
605 UIC 0 : MarkBufferDirty(deletebuf);
606 : }
607 :
608 : /*
609 : * Replay is complete, now we can release the buffers. We release locks at
610 : * end of replay operation to ensure that we hold lock on primary bucket
611 : * page till end of operation. We can optimize by releasing the lock on
612 : * write buffer as soon as the operation for same is complete, if it is
613 : * not same as primary bucket page, but that doesn't seem to be worth
614 : * complicating the code.
615 EUB : */
616 UBC 0 : if (BufferIsValid(deletebuf))
617 UIC 0 : UnlockReleaseBuffer(deletebuf);
618 EUB :
619 UBC 0 : if (BufferIsValid(writebuf))
620 UIC 0 : UnlockReleaseBuffer(writebuf);
621 EUB :
622 UBC 0 : if (BufferIsValid(bucketbuf))
623 0 : UnlockReleaseBuffer(bucketbuf);
624 UIC 0 : }
625 :
626 : /*
627 : * replay squeeze page operation of hash index
628 : */
629 ECB : static void
630 GIC 22 : hash_xlog_squeeze_page(XLogReaderState *record)
631 ECB : {
632 CBC 22 : XLogRecPtr lsn = record->EndRecPtr;
633 22 : xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record);
634 GIC 22 : Buffer bucketbuf = InvalidBuffer;
635 : Buffer writebuf;
636 ECB : Buffer ovflbuf;
637 GIC 22 : Buffer prevbuf = InvalidBuffer;
638 : Buffer mapbuf;
639 : XLogRedoAction action;
640 :
641 : /*
642 : * Ensure we have a cleanup lock on primary bucket page before we start
643 : * with the actual replay operation. This is to ensure that neither a
644 : * scan can start nor a scan can be already-in-progress during the replay
645 : * of this operation. If we allow scans during this operation, then they
646 : * can miss some records or show the same record multiple times.
647 ECB : */
648 CBC 22 : if (xldata->is_prim_bucket_same_wrt)
649 GIC 21 : action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
650 : else
651 : {
652 : /*
653 : * we don't care for return value as the purpose of reading bucketbuf
654 : * is to ensure a cleanup lock on primary bucket page.
655 ECB : */
656 GIC 1 : (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
657 ECB :
658 GIC 1 : action = XLogReadBufferForRedo(record, 1, &writebuf);
659 : }
660 :
661 ECB : /* replay the record for adding entries in overflow buffer */
662 GIC 22 : if (action == BLK_NEEDS_REDO)
663 : {
664 : Page writepage;
665 : char *begin;
666 : char *data;
667 ECB : Size datalen;
668 GIC 22 : uint16 ninserted = 0;
669 ECB :
670 GIC 22 : data = begin = XLogRecGetBlockData(record, 1, &datalen);
671 ECB :
672 GIC 22 : writepage = (Page) BufferGetPage(writebuf);
673 ECB :
674 GIC 22 : if (xldata->ntups > 0)
675 ECB : {
676 GIC 10 : OffsetNumber *towrite = (OffsetNumber *) data;
677 ECB :
678 GIC 10 : data += sizeof(OffsetNumber) * xldata->ntups;
679 ECB :
680 GIC 414 : while (data - begin < datalen)
681 ECB : {
682 GIC 404 : IndexTuple itup = (IndexTuple) data;
683 : Size itemsz;
684 : OffsetNumber l;
685 ECB :
686 CBC 404 : itemsz = IndexTupleSize(itup);
687 GIC 404 : itemsz = MAXALIGN(itemsz);
688 ECB :
689 GIC 404 : data += itemsz;
690 ECB :
691 CBC 404 : l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
692 GBC 404 : if (l == InvalidOffsetNumber)
693 UIC 0 : elog(ERROR, "hash_xlog_squeeze_page: failed to add item to hash index page, size %d bytes",
694 : (int) itemsz);
695 ECB :
696 GIC 404 : ninserted++;
697 : }
698 : }
699 :
700 : /*
701 : * number of tuples inserted must be same as requested in REDO record.
702 ECB : */
703 GIC 22 : Assert(ninserted == xldata->ntups);
704 :
705 : /*
706 : * if the page on which are adding tuples is a page previous to freed
707 : * overflow page, then update its nextblkno.
708 ECB : */
709 GIC 22 : if (xldata->is_prev_bucket_same_wrt)
710 ECB : {
711 GIC 9 : HashPageOpaque writeopaque = HashPageGetOpaque(writepage);
712 ECB :
713 GIC 9 : writeopaque->hasho_nextblkno = xldata->nextblkno;
714 : }
715 ECB :
716 CBC 22 : PageSetLSN(writepage, lsn);
717 GIC 22 : MarkBufferDirty(writebuf);
718 : }
719 :
720 ECB : /* replay the record for initializing overflow buffer */
721 GIC 22 : if (XLogReadBufferForRedo(record, 2, &ovflbuf) == BLK_NEEDS_REDO)
722 : {
723 : Page ovflpage;
724 : HashPageOpaque ovflopaque;
725 EUB :
726 UIC 0 : ovflpage = BufferGetPage(ovflbuf);
727 EUB :
728 UIC 0 : _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
729 EUB :
730 UIC 0 : ovflopaque = HashPageGetOpaque(ovflpage);
731 EUB :
732 UBC 0 : ovflopaque->hasho_prevblkno = InvalidBlockNumber;
733 0 : ovflopaque->hasho_nextblkno = InvalidBlockNumber;
734 0 : ovflopaque->hasho_bucket = InvalidBucket;
735 0 : ovflopaque->hasho_flag = LH_UNUSED_PAGE;
736 UIC 0 : ovflopaque->hasho_page_id = HASHO_PAGE_ID;
737 EUB :
738 UBC 0 : PageSetLSN(ovflpage, lsn);
739 UIC 0 : MarkBufferDirty(ovflbuf);
740 ECB : }
741 CBC 22 : if (BufferIsValid(ovflbuf))
742 GIC 22 : UnlockReleaseBuffer(ovflbuf);
743 :
744 ECB : /* replay the record for page previous to the freed overflow page */
745 CBC 35 : if (!xldata->is_prev_bucket_same_wrt &&
746 GIC 13 : XLogReadBufferForRedo(record, 3, &prevbuf) == BLK_NEEDS_REDO)
747 ECB : {
748 CBC 13 : Page prevpage = BufferGetPage(prevbuf);
749 GIC 13 : HashPageOpaque prevopaque = HashPageGetOpaque(prevpage);
750 ECB :
751 GIC 13 : prevopaque->hasho_nextblkno = xldata->nextblkno;
752 ECB :
753 CBC 13 : PageSetLSN(prevpage, lsn);
754 GIC 13 : MarkBufferDirty(prevbuf);
755 ECB : }
756 CBC 22 : if (BufferIsValid(prevbuf))
757 GIC 13 : UnlockReleaseBuffer(prevbuf);
758 :
759 ECB : /* replay the record for page next to the freed overflow page */
760 GIC 22 : if (XLogRecHasBlockRef(record, 4))
761 : {
762 : Buffer nextbuf;
763 EUB :
764 UIC 0 : if (XLogReadBufferForRedo(record, 4, &nextbuf) == BLK_NEEDS_REDO)
765 EUB : {
766 UBC 0 : Page nextpage = BufferGetPage(nextbuf);
767 UIC 0 : HashPageOpaque nextopaque = HashPageGetOpaque(nextpage);
768 EUB :
769 UIC 0 : nextopaque->hasho_prevblkno = xldata->prevblkno;
770 EUB :
771 UBC 0 : PageSetLSN(nextpage, lsn);
772 UIC 0 : MarkBufferDirty(nextbuf);
773 EUB : }
774 UBC 0 : if (BufferIsValid(nextbuf))
775 UIC 0 : UnlockReleaseBuffer(nextbuf);
776 : }
777 ECB :
778 CBC 22 : if (BufferIsValid(writebuf))
779 GIC 22 : UnlockReleaseBuffer(writebuf);
780 ECB :
781 CBC 22 : if (BufferIsValid(bucketbuf))
782 GIC 1 : UnlockReleaseBuffer(bucketbuf);
783 :
784 : /*
785 : * Note: in normal operation, we'd update the bitmap and meta page while
786 : * still holding lock on the primary bucket page and overflow pages. But
787 : * during replay it's not necessary to hold those locks, since no other
788 : * index updates can be happening concurrently.
789 : */
790 ECB : /* replay the record for bitmap page */
791 GIC 22 : if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO)
792 ECB : {
793 CBC 21 : Page mappage = (Page) BufferGetPage(mapbuf);
794 GIC 21 : uint32 *freep = NULL;
795 : char *data;
796 : uint32 *bitmap_page_bit;
797 : Size datalen;
798 ECB :
799 GIC 21 : freep = HashPageGetBitmap(mappage);
800 ECB :
801 CBC 21 : data = XLogRecGetBlockData(record, 5, &datalen);
802 GIC 21 : bitmap_page_bit = (uint32 *) data;
803 ECB :
804 GIC 21 : CLRBIT(freep, *bitmap_page_bit);
805 ECB :
806 CBC 21 : PageSetLSN(mappage, lsn);
807 GIC 21 : MarkBufferDirty(mapbuf);
808 ECB : }
809 CBC 22 : if (BufferIsValid(mapbuf))
810 GIC 22 : UnlockReleaseBuffer(mapbuf);
811 :
812 ECB : /* replay the record for meta page */
813 GIC 22 : if (XLogRecHasBlockRef(record, 6))
814 : {
815 : Buffer metabuf;
816 ECB :
817 GIC 21 : if (XLogReadBufferForRedo(record, 6, &metabuf) == BLK_NEEDS_REDO)
818 : {
819 : HashMetaPage metap;
820 : Page page;
821 : char *data;
822 : uint32 *firstfree_ovflpage;
823 : Size datalen;
824 ECB :
825 CBC 21 : data = XLogRecGetBlockData(record, 6, &datalen);
826 GIC 21 : firstfree_ovflpage = (uint32 *) data;
827 ECB :
828 CBC 21 : page = BufferGetPage(metabuf);
829 21 : metap = HashPageGetMeta(page);
830 GIC 21 : metap->hashm_firstfree = *firstfree_ovflpage;
831 ECB :
832 CBC 21 : PageSetLSN(page, lsn);
833 GIC 21 : MarkBufferDirty(metabuf);
834 ECB : }
835 CBC 21 : if (BufferIsValid(metabuf))
836 GIC 21 : UnlockReleaseBuffer(metabuf);
837 ECB : }
838 GIC 22 : }
839 :
840 : /*
841 : * replay delete operation of hash index
842 : */
843 ECB : static void
844 GIC 244 : hash_xlog_delete(XLogReaderState *record)
845 ECB : {
846 CBC 244 : XLogRecPtr lsn = record->EndRecPtr;
847 244 : xl_hash_delete *xldata = (xl_hash_delete *) XLogRecGetData(record);
848 GIC 244 : Buffer bucketbuf = InvalidBuffer;
849 : Buffer deletebuf;
850 : Page page;
851 : XLogRedoAction action;
852 :
853 : /*
854 : * Ensure we have a cleanup lock on primary bucket page before we start
855 : * with the actual replay operation. This is to ensure that neither a
856 : * scan can start nor a scan can be already-in-progress during the replay
857 : * of this operation. If we allow scans during this operation, then they
858 : * can miss some records or show the same record multiple times.
859 ECB : */
860 CBC 244 : if (xldata->is_primary_bucket_page)
861 GIC 222 : action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &deletebuf);
862 : else
863 : {
864 : /*
865 : * we don't care for return value as the purpose of reading bucketbuf
866 : * is to ensure a cleanup lock on primary bucket page.
867 ECB : */
868 GIC 22 : (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
869 ECB :
870 GIC 22 : action = XLogReadBufferForRedo(record, 1, &deletebuf);
871 : }
872 :
873 ECB : /* replay the record for deleting entries in bucket page */
874 GIC 244 : if (action == BLK_NEEDS_REDO)
875 : {
876 : char *ptr;
877 : Size len;
878 ECB :
879 GIC 244 : ptr = XLogRecGetBlockData(record, 1, &len);
880 ECB :
881 GIC 244 : page = (Page) BufferGetPage(deletebuf);
882 ECB :
883 GIC 244 : if (len > 0)
884 : {
885 : OffsetNumber *unused;
886 : OffsetNumber *unend;
887 ECB :
888 CBC 244 : unused = (OffsetNumber *) ptr;
889 GIC 244 : unend = (OffsetNumber *) ((char *) ptr + len);
890 ECB :
891 CBC 244 : if ((unend - unused) > 0)
892 GIC 244 : PageIndexMultiDelete(page, unused, unend - unused);
893 : }
894 :
895 : /*
896 : * Mark the page as not containing any LP_DEAD items only if
897 : * clear_dead_marking flag is set to true. See comments in
898 : * hashbucketcleanup() for details.
899 ECB : */
900 GIC 244 : if (xldata->clear_dead_marking)
901 : {
902 : HashPageOpaque pageopaque;
903 EUB :
904 UBC 0 : pageopaque = HashPageGetOpaque(page);
905 UIC 0 : pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
906 : }
907 ECB :
908 CBC 244 : PageSetLSN(page, lsn);
909 GIC 244 : MarkBufferDirty(deletebuf);
910 ECB : }
911 CBC 244 : if (BufferIsValid(deletebuf))
912 GIC 244 : UnlockReleaseBuffer(deletebuf);
913 ECB :
914 CBC 244 : if (BufferIsValid(bucketbuf))
915 22 : UnlockReleaseBuffer(bucketbuf);
916 GIC 244 : }
917 :
918 : /*
919 : * replay split cleanup flag operation for primary bucket page.
920 : */
921 ECB : static void
922 GIC 221 : hash_xlog_split_cleanup(XLogReaderState *record)
923 ECB : {
924 GIC 221 : XLogRecPtr lsn = record->EndRecPtr;
925 : Buffer buffer;
926 : Page page;
927 ECB :
928 GIC 221 : if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
929 : {
930 : HashPageOpaque bucket_opaque;
931 ECB :
932 GIC 221 : page = (Page) BufferGetPage(buffer);
933 ECB :
934 CBC 221 : bucket_opaque = HashPageGetOpaque(page);
935 221 : bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
936 221 : PageSetLSN(page, lsn);
937 GIC 221 : MarkBufferDirty(buffer);
938 ECB : }
939 CBC 221 : if (BufferIsValid(buffer))
940 221 : UnlockReleaseBuffer(buffer);
941 GIC 221 : }
942 :
943 : /*
944 : * replay for update meta page
945 : */
946 ECB : static void
947 GIC 2 : hash_xlog_update_meta_page(XLogReaderState *record)
948 : {
949 ECB : HashMetaPage metap;
950 CBC 2 : XLogRecPtr lsn = record->EndRecPtr;
951 GIC 2 : xl_hash_update_meta_page *xldata = (xl_hash_update_meta_page *) XLogRecGetData(record);
952 : Buffer metabuf;
953 : Page page;
954 ECB :
955 GIC 2 : if (XLogReadBufferForRedo(record, 0, &metabuf) == BLK_NEEDS_REDO)
956 ECB : {
957 CBC 2 : page = BufferGetPage(metabuf);
958 GIC 2 : metap = HashPageGetMeta(page);
959 ECB :
960 GIC 2 : metap->hashm_ntuples = xldata->ntuples;
961 ECB :
962 CBC 2 : PageSetLSN(page, lsn);
963 GIC 2 : MarkBufferDirty(metabuf);
964 ECB : }
965 CBC 2 : if (BufferIsValid(metabuf))
966 2 : UnlockReleaseBuffer(metabuf);
967 GIC 2 : }
968 :
969 : /*
970 : * replay delete operation in hash index to remove
971 : * tuples marked as DEAD during index tuple insertion.
972 : */
973 EUB : static void
974 UIC 0 : hash_xlog_vacuum_one_page(XLogReaderState *record)
975 EUB : {
976 UIC 0 : XLogRecPtr lsn = record->EndRecPtr;
977 : xl_hash_vacuum_one_page *xldata;
978 : Buffer buffer;
979 : Buffer metabuf;
980 : Page page;
981 : XLogRedoAction action;
982 : HashPageOpaque pageopaque;
983 : OffsetNumber *toDelete;
984 :
985 UBC 0 : xldata = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
986 UNC 0 : toDelete = xldata->offsets;
987 EUB :
988 : /*
989 : * If we have any conflict processing to do, it must happen before we
990 : * update the page.
991 : *
992 : * Hash index records that are marked as LP_DEAD and being removed during
993 : * hash index tuple insertion can conflict with standby queries. You might
994 : * think that vacuum records would conflict as well, but we've handled
995 : * that already. XLOG_HEAP2_PRUNE records provide the highest xid cleaned
996 : * by the vacuum of the heap and so we can resolve any conflicts just once
997 : * when that arrives. After that we know that no conflicts exist from
998 : * individual hash index vacuum records on that index.
999 : */
1000 UIC 0 : if (InHotStandby)
1001 EUB : {
1002 : RelFileLocator rlocator;
1003 :
1004 UNC 0 : XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
1005 0 : ResolveRecoveryConflictWithSnapshot(xldata->snapshotConflictHorizon,
1006 0 : xldata->isCatalogRel,
1007 : rlocator);
1008 EUB : }
1009 :
1010 UIC 0 : action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);
1011 :
1012 0 : if (action == BLK_NEEDS_REDO)
1013 EUB : {
1014 UIC 0 : page = (Page) BufferGetPage(buffer);
1015 EUB :
1016 UNC 0 : PageIndexMultiDelete(page, toDelete, xldata->ntuples);
1017 :
1018 EUB : /*
1019 : * Mark the page as not containing any LP_DEAD items. See comments in
1020 : * _hash_vacuum_one_page() for details.
1021 : */
1022 UBC 0 : pageopaque = HashPageGetOpaque(page);
1023 UIC 0 : pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
1024 EUB :
1025 UBC 0 : PageSetLSN(page, lsn);
1026 UIC 0 : MarkBufferDirty(buffer);
1027 EUB : }
1028 UIC 0 : if (BufferIsValid(buffer))
1029 0 : UnlockReleaseBuffer(buffer);
1030 :
1031 0 : if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
1032 EUB : {
1033 : Page metapage;
1034 : HashMetaPage metap;
1035 :
1036 UIC 0 : metapage = BufferGetPage(metabuf);
1037 UBC 0 : metap = HashPageGetMeta(metapage);
1038 EUB :
1039 UIC 0 : metap->hashm_ntuples -= xldata->ntuples;
1040 EUB :
1041 UBC 0 : PageSetLSN(metapage, lsn);
1042 0 : MarkBufferDirty(metabuf);
1043 : }
1044 UIC 0 : if (BufferIsValid(metabuf))
1045 LBC 0 : UnlockReleaseBuffer(metabuf);
1046 UIC 0 : }
1047 ECB :
1048 : void
1049 CBC 115260 : hash_redo(XLogReaderState *record)
1050 : {
1051 115260 : uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1052 ECB :
1053 CBC 115260 : switch (info)
1054 ECB : {
1055 CBC 21 : case XLOG_HASH_INIT_META_PAGE:
1056 21 : hash_xlog_init_meta_page(record);
1057 21 : break;
1058 21 : case XLOG_HASH_INIT_BITMAP_PAGE:
1059 21 : hash_xlog_init_bitmap_page(record);
1060 21 : break;
1061 113999 : case XLOG_HASH_INSERT:
1062 113999 : hash_xlog_insert(record);
1063 113999 : break;
1064 54 : case XLOG_HASH_ADD_OVFL_PAGE:
1065 54 : hash_xlog_add_ovfl_page(record);
1066 54 : break;
1067 221 : case XLOG_HASH_SPLIT_ALLOCATE_PAGE:
1068 221 : hash_xlog_split_allocate_page(record);
1069 221 : break;
1070 234 : case XLOG_HASH_SPLIT_PAGE:
1071 234 : hash_xlog_split_page(record);
1072 GBC 234 : break;
1073 221 : case XLOG_HASH_SPLIT_COMPLETE:
1074 221 : hash_xlog_split_complete(record);
1075 CBC 221 : break;
1076 LBC 0 : case XLOG_HASH_MOVE_PAGE_CONTENTS:
1077 0 : hash_xlog_move_page_contents(record);
1078 0 : break;
1079 CBC 22 : case XLOG_HASH_SQUEEZE_PAGE:
1080 22 : hash_xlog_squeeze_page(record);
1081 22 : break;
1082 244 : case XLOG_HASH_DELETE:
1083 244 : hash_xlog_delete(record);
1084 244 : break;
1085 221 : case XLOG_HASH_SPLIT_CLEANUP:
1086 221 : hash_xlog_split_cleanup(record);
1087 GBC 221 : break;
1088 2 : case XLOG_HASH_UPDATE_META_PAGE:
1089 2 : hash_xlog_update_meta_page(record);
1090 2 : break;
1091 UBC 0 : case XLOG_HASH_VACUUM_ONE_PAGE:
1092 UIC 0 : hash_xlog_vacuum_one_page(record);
1093 LBC 0 : break;
1094 UIC 0 : default:
1095 0 : elog(PANIC, "hash_redo: unknown op code %u", info);
1096 : }
1097 GIC 115260 : }
1098 :
1099 EUB : /*
1100 : * Mask a hash page before performing consistency checks on it.
1101 : */
1102 : void
1103 UIC 0 : hash_mask(char *pagedata, BlockNumber blkno)
1104 : {
1105 UBC 0 : Page page = (Page) pagedata;
1106 : HashPageOpaque opaque;
1107 EUB : int pagetype;
1108 :
1109 UIC 0 : mask_page_lsn_and_checksum(page);
1110 EUB :
1111 UIC 0 : mask_page_hint_bits(page);
1112 UBC 0 : mask_unused_space(page);
1113 EUB :
1114 UIC 0 : opaque = HashPageGetOpaque(page);
1115 :
1116 0 : pagetype = opaque->hasho_flag & LH_PAGE_TYPE;
1117 0 : if (pagetype == LH_UNUSED_PAGE)
1118 EUB : {
1119 : /*
1120 : * Mask everything on a UNUSED page.
1121 : */
1122 UIC 0 : mask_page_content(page);
1123 : }
1124 0 : else if (pagetype == LH_BUCKET_PAGE ||
1125 : pagetype == LH_OVERFLOW_PAGE)
1126 : {
1127 : /*
1128 EUB : * In hash bucket and overflow pages, it is possible to modify the
1129 : * LP_FLAGS without emitting any WAL record. Hence, mask the line
1130 : * pointer flags. See hashgettuple(), _hash_kill_items() for details.
1131 : */
1132 UIC 0 : mask_lp_flags(page);
1133 : }
1134 :
1135 EUB : /*
1136 : * It is possible that the hint bit LH_PAGE_HAS_DEAD_TUPLES may remain
1137 : * unlogged. So, mask it. See _hash_kill_items() for details.
1138 : */
1139 UIC 0 : opaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
1140 0 : }
|