Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * visibilitymap.c
4 : : * bitmap for tracking visibility of heap tuples
5 : : *
6 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/heap/visibilitymap.c
12 : : *
13 : : * INTERFACE ROUTINES
14 : : * visibilitymap_clear - clear bits for one page in the visibility map
15 : : * visibilitymap_pin - pin a map page for setting a bit
16 : : * visibilitymap_pin_ok - check whether correct map page is already pinned
17 : : * visibilitymap_set - set a bit in a previously pinned page
18 : : * visibilitymap_get_status - get status of bits
19 : : * visibilitymap_count - count number of bits set in visibility map
20 : : * visibilitymap_prepare_truncate -
21 : : * prepare for truncation of the visibility map
22 : : *
23 : : * NOTES
24 : : *
25 : : * The visibility map is a bitmap with two bits (all-visible and all-frozen)
26 : : * per heap page. A set all-visible bit means that all tuples on the page are
27 : : * known visible to all transactions, and therefore the page doesn't need to
28 : : * be vacuumed. A set all-frozen bit means that all tuples on the page are
29 : : * completely frozen, and therefore the page doesn't need to be vacuumed even
30 : : * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
31 : : * The all-frozen bit must be set only when the page is already all-visible.
32 : : *
33 : : * The map is conservative in the sense that we make sure that whenever a bit
34 : : * is set, we know the condition is true, but if a bit is not set, it might or
35 : : * might not be true.
36 : : *
37 : : * Clearing visibility map bits is not separately WAL-logged. The callers
38 : : * must make sure that whenever a bit is cleared, the bit is cleared on WAL
39 : : * replay of the updating operation as well.
40 : : *
41 : : * When we *set* a visibility map during VACUUM, we must write WAL. This may
42 : : * seem counterintuitive, since the bit is basically a hint: if it is clear,
43 : : * it may still be the case that every tuple on the page is visible to all
44 : : * transactions; we just don't know that for certain. The difficulty is that
45 : : * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
46 : : * on the page itself, and the visibility map bit. If a crash occurs after the
47 : : * visibility map page makes it to disk and before the updated heap page makes
48 : : * it to disk, redo must set the bit on the heap page. Otherwise, the next
49 : : * insert, update, or delete on the heap page will fail to realize that the
50 : : * visibility map bit must be cleared, possibly causing index-only scans to
51 : : * return wrong answers.
52 : : *
53 : : * VACUUM will normally skip pages for which the visibility map bit is set;
54 : : * such pages can't contain any dead tuples and therefore don't need vacuuming.
55 : : *
56 : : * LOCKING
57 : : *
58 : : * In heapam.c, whenever a page is modified so that not all tuples on the
59 : : * page are visible to everyone anymore, the corresponding bit in the
60 : : * visibility map is cleared. In order to be crash-safe, we need to do this
61 : : * while still holding a lock on the heap page and in the same critical
62 : : * section that logs the page modification. However, we don't want to hold
63 : : * the buffer lock over any I/O that may be required to read in the visibility
64 : : * map page. To avoid this, we examine the heap page before locking it;
65 : : * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
66 : : * bit. Then, we lock the buffer. But this creates a race condition: there
67 : : * is a possibility that in the time it takes to lock the buffer, the
68 : : * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
69 : : * buffer, pin the visibility map page, and relock the buffer. This shouldn't
70 : : * happen often, because only VACUUM currently sets visibility map bits,
71 : : * and the race will only occur if VACUUM processes a given page at almost
72 : : * exactly the same time that someone tries to further modify it.
73 : : *
74 : : * To set a bit, you need to hold a lock on the heap page. That prevents
75 : : * the race condition where VACUUM sees that all tuples on the page are
76 : : * visible to everyone, but another backend modifies the page before VACUUM
77 : : * sets the bit in the visibility map.
78 : : *
79 : : * When a bit is set, the LSN of the visibility map page is updated to make
80 : : * sure that the visibility map update doesn't get written to disk before the
81 : : * WAL record of the changes that made it possible to set the bit is flushed.
82 : : * But when a bit is cleared, we don't have to do that because it's always
83 : : * safe to clear a bit in the map from correctness point of view.
84 : : *
85 : : *-------------------------------------------------------------------------
86 : : */
87 : : #include "postgres.h"
88 : :
89 : : #include "access/heapam_xlog.h"
90 : : #include "access/visibilitymap.h"
91 : : #include "access/xloginsert.h"
92 : : #include "access/xlogutils.h"
93 : : #include "miscadmin.h"
94 : : #include "port/pg_bitutils.h"
95 : : #include "storage/bufmgr.h"
96 : : #include "storage/smgr.h"
97 : : #include "utils/inval.h"
98 : : #include "utils/rel.h"
99 : :
100 : :
101 : : /*#define TRACE_VISIBILITYMAP */
102 : :
103 : : /*
104 : : * Size of the bitmap on each visibility map page, in bytes. There's no
105 : : * extra headers, so the whole page minus the standard page header is
106 : : * used for the bitmap.
107 : : */
108 : : #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
109 : :
110 : : /* Number of heap blocks we can represent in one byte */
111 : : #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
112 : :
113 : : /* Number of heap blocks we can represent in one visibility map page. */
114 : : #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
115 : :
116 : : /* Mapping from heap block number to the right bit in the visibility map */
117 : : #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
118 : : #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
119 : : #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
120 : :
121 : : /* Masks for counting subsets of bits in the visibility map. */
122 : : #define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
123 : : #define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
124 : :
125 : : /* prototypes for internal routines */
126 : : static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
127 : : static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
128 : :
129 : :
130 : : /*
131 : : * visibilitymap_clear - clear specified bits for one page in visibility map
132 : : *
133 : : * You must pass a buffer containing the correct map page to this function.
134 : : * Call visibilitymap_pin first to pin the right one. This function doesn't do
135 : : * any I/O. Returns true if any bits have been cleared and false otherwise.
136 : : */
137 : : bool
573 pg@bowt.ie 138 :CBC 18081 : visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
139 : : {
5611 heikki.linnakangas@i 140 : 18081 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
141 : 18081 : int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2872 rhaas@postgresql.org 142 : 18081 : int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
2827 andres@anarazel.de 143 : 18081 : uint8 mask = flags << mapOffset;
144 : : char *map;
145 : 18081 : bool cleared = false;
146 : :
147 : : /* Must never clear all_visible bit while leaving all_frozen bit set */
148 [ - + ]: 18081 : Assert(flags & VISIBILITYMAP_VALID_BITS);
454 pg@bowt.ie 149 [ - + ]: 18081 : Assert(flags != VISIBILITYMAP_ALL_VISIBLE);
150 : :
151 : : #ifdef TRACE_VISIBILITYMAP
152 : : elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
153 : : #endif
154 : :
573 155 [ + - - + ]: 18081 : if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
4681 rhaas@postgresql.org 156 [ # # ]:UBC 0 : elog(ERROR, "wrong buffer passed to visibilitymap_clear");
157 : :
573 pg@bowt.ie 158 :CBC 18081 : LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
159 : 18081 : map = PageGetContents(BufferGetPage(vmbuf));
160 : :
5611 heikki.linnakangas@i 161 [ + + ]: 18081 : if (map[mapByte] & mask)
162 : : {
163 : 15984 : map[mapByte] &= ~mask;
164 : :
573 pg@bowt.ie 165 : 15984 : MarkBufferDirty(vmbuf);
2827 andres@anarazel.de 166 : 15984 : cleared = true;
167 : : }
168 : :
573 pg@bowt.ie 169 : 18081 : LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
170 : :
2827 andres@anarazel.de 171 : 18081 : return cleared;
172 : : }
173 : :
174 : : /*
175 : : * visibilitymap_pin - pin a map page for setting a bit
176 : : *
177 : : * Setting a bit in the visibility map is a two-phase operation. First, call
178 : : * visibilitymap_pin, to pin the visibility map page containing the bit for
179 : : * the heap page. Because that can require I/O to read the map page, you
180 : : * shouldn't hold a lock on the heap page while doing that. Then, call
181 : : * visibilitymap_set to actually set the bit.
182 : : *
183 : : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
184 : : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
185 : : * relation. On return, *vmbuf is a valid buffer with the map page containing
186 : : * the bit for heapBlk.
187 : : *
188 : : * If the page doesn't exist in the map file yet, it is extended.
189 : : */
190 : : void
573 pg@bowt.ie 191 : 368520 : visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
192 : : {
5611 heikki.linnakangas@i 193 : 368520 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
194 : :
195 : : /* Reuse the old pinned buffer if possible */
573 pg@bowt.ie 196 [ + + ]: 368520 : if (BufferIsValid(*vmbuf))
197 : : {
198 [ + - ]: 317023 : if (BufferGetBlockNumber(*vmbuf) == mapBlock)
5611 heikki.linnakangas@i 199 : 317023 : return;
200 : :
573 pg@bowt.ie 201 :UBC 0 : ReleaseBuffer(*vmbuf);
202 : : }
573 pg@bowt.ie 203 :CBC 51497 : *vmbuf = vm_readbuf(rel, mapBlock, true);
204 : : }
205 : :
206 : : /*
207 : : * visibilitymap_pin_ok - do we already have the correct page pinned?
208 : : *
209 : : * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
210 : : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
211 : : * relation. The return value indicates whether the buffer covers the
212 : : * given heapBlk.
213 : : */
214 : : bool
215 : 15090 : visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
216 : : {
4681 rhaas@postgresql.org 217 : 15090 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
218 : :
573 pg@bowt.ie 219 [ + + + - ]: 15090 : return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
220 : : }
221 : :
222 : : /*
223 : : * visibilitymap_set - set bit(s) on a previously pinned page
224 : : *
225 : : * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
226 : : * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
227 : : * one provided; in normal running, we generate a new XLOG record and set the
228 : : * page LSN to that value (though the heap page's LSN may *not* be updated;
229 : : * see below). cutoff_xid is the largest xmin on the page being marked
230 : : * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
231 : : * if the page contains no tuples. It can also be set to InvalidTransactionId
232 : : * when a page that is already all-visible is being marked all-frozen.
233 : : *
234 : : * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
235 : : * this function. Except in recovery, caller should also pass the heap
236 : : * buffer. When checksums are enabled and we're not in recovery, we must add
237 : : * the heap buffer to the WAL chain to protect it from being torn.
238 : : *
239 : : * You must pass a buffer containing the correct map page to this function.
240 : : * Call visibilitymap_pin first to pin the right one. This function doesn't do
241 : : * any I/O.
242 : : */
243 : : void
4041 simon@2ndQuadrant.co 244 : 49012 : visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
245 : : XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
246 : : uint8 flags)
247 : : {
5611 heikki.linnakangas@i 248 : 49012 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
249 : 49012 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2872 rhaas@postgresql.org 250 : 49012 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
251 : : Page page;
252 : : uint8 *map;
253 : :
254 : : #ifdef TRACE_VISIBILITYMAP
255 : : elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
256 : : #endif
257 : :
4681 258 [ + + - + ]: 49012 : Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
454 pg@bowt.ie 259 [ + + - + ]: 49012 : Assert(InRecovery || PageIsAllVisible((Page) BufferGetPage(heapBuf)));
379 andres@anarazel.de 260 [ - + ]: 49012 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
261 : :
262 : : /* Must never set all_frozen bit without also setting all_visible bit */
454 pg@bowt.ie 263 [ - + ]: 49012 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
264 : :
265 : : /* Check that we have the right heap page pinned, if present */
4041 simon@2ndQuadrant.co 266 [ + + - + ]: 49012 : if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
4041 simon@2ndQuadrant.co 267 [ # # ]:UBC 0 : elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
268 : :
269 : : /* Check that we have the right VM page pinned */
4041 simon@2ndQuadrant.co 270 [ + - - + ]:CBC 49012 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
4041 simon@2ndQuadrant.co 271 [ # # ]:UBC 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
272 : :
2916 kgrittn@postgresql.o 273 :CBC 49012 : page = BufferGetPage(vmBuf);
2866 rhaas@postgresql.org 274 : 49012 : map = (uint8 *) PageGetContents(page);
4041 simon@2ndQuadrant.co 275 : 49012 : LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
276 : :
2872 rhaas@postgresql.org 277 [ + - ]: 49012 : if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
278 : : {
4681 279 : 49012 : START_CRIT_SECTION();
280 : :
2872 281 : 49012 : map[mapByte] |= (flags << mapOffset);
4041 simon@2ndQuadrant.co 282 : 49012 : MarkBufferDirty(vmBuf);
283 : :
4681 rhaas@postgresql.org 284 [ + + + + : 49012 : if (RelationNeedsWAL(rel))
+ - + + ]
285 : : {
286 [ + + ]: 46606 : if (XLogRecPtrIsInvalid(recptr))
287 : : {
4041 simon@2ndQuadrant.co 288 [ - + ]: 39281 : Assert(!InRecovery);
379 andres@anarazel.de 289 : 39281 : recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
290 : :
291 : : /*
292 : : * If data checksums are enabled (or wal_log_hints=on), we
293 : : * need to protect the heap page from being torn.
294 : : *
295 : : * If not, then we must *not* update the heap page's LSN. In
296 : : * this case, the FPI for the heap page was omitted from the
297 : : * WAL record inserted above, so it would be incorrect to
298 : : * update the heap page's LSN.
299 : : */
3775 heikki.linnakangas@i 300 [ + + + + ]: 39281 : if (XLogHintBitIsNeeded())
301 : : {
2916 kgrittn@postgresql.o 302 : 8147 : Page heapPage = BufferGetPage(heapBuf);
303 : :
4041 simon@2ndQuadrant.co 304 : 8147 : PageSetLSN(heapPage, recptr);
305 : : }
306 : : }
5611 heikki.linnakangas@i 307 : 46606 : PageSetLSN(page, recptr);
308 : : }
309 : :
4681 rhaas@postgresql.org 310 [ - + ]: 49012 : END_CRIT_SECTION();
311 : : }
312 : :
4041 simon@2ndQuadrant.co 313 : 49012 : LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
5611 heikki.linnakangas@i 314 : 49012 : }
315 : :
316 : : /*
317 : : * visibilitymap_get_status - get status of bits
318 : : *
319 : : * Are all tuples on heapBlk visible to all or are marked frozen, according
320 : : * to the visibility map?
321 : : *
322 : : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
323 : : * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
324 : : * relation. On return, *vmbuf is a valid buffer with the map page containing
325 : : * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
326 : : * releasing *vmbuf after it's done testing and setting bits.
327 : : *
328 : : * NOTE: This function is typically called without a lock on the heap page,
329 : : * so somebody else could change the bit just after we look at it. In fact,
330 : : * since we don't lock the visibility map page either, it's even possible that
331 : : * someone else could have changed the bit just before we look at it, but yet
332 : : * we might see the old value. It is the caller's responsibility to deal with
333 : : * all concurrency issues!
334 : : */
335 : : uint8
573 pg@bowt.ie 336 : 3489965 : visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
337 : : {
5611 heikki.linnakangas@i 338 : 3489965 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
339 : 3489965 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2872 rhaas@postgresql.org 340 : 3489965 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
341 : : char *map;
342 : : uint8 result;
343 : :
344 : : #ifdef TRACE_VISIBILITYMAP
345 : : elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
346 : : #endif
347 : :
348 : : /* Reuse the old pinned buffer if possible */
573 pg@bowt.ie 349 [ + + ]: 3489965 : if (BufferIsValid(*vmbuf))
350 : : {
351 [ - + ]: 2629049 : if (BufferGetBlockNumber(*vmbuf) != mapBlock)
352 : : {
573 pg@bowt.ie 353 :UBC 0 : ReleaseBuffer(*vmbuf);
354 : 0 : *vmbuf = InvalidBuffer;
355 : : }
356 : : }
357 : :
573 pg@bowt.ie 358 [ + + ]:CBC 3489965 : if (!BufferIsValid(*vmbuf))
359 : : {
360 : 860916 : *vmbuf = vm_readbuf(rel, mapBlock, false);
361 [ + + ]: 860916 : if (!BufferIsValid(*vmbuf))
5611 heikki.linnakangas@i 362 : 776446 : return false;
363 : : }
364 : :
573 pg@bowt.ie 365 : 2713519 : map = PageGetContents(BufferGetPage(*vmbuf));
366 : :
367 : : /*
368 : : * A single byte read is atomic. There could be memory-ordering effects
369 : : * here, but for performance reasons we make it the caller's job to worry
370 : : * about that.
371 : : */
2872 rhaas@postgresql.org 372 : 2713519 : result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
373 : 2713519 : return result;
374 : : }
375 : :
376 : : /*
377 : : * visibilitymap_count - count number of bits set in visibility map
378 : : *
379 : : * Note: we ignore the possibility of race conditions when the table is being
380 : : * extended concurrently with the call. New pages added to the table aren't
381 : : * going to be marked all-visible or all-frozen, so they won't affect the result.
382 : : */
383 : : void
2966 384 : 95541 : visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
385 : : {
386 : : BlockNumber mapBlock;
1885 tgl@sss.pgh.pa.us 387 : 95541 : BlockNumber nvisible = 0;
388 : 95541 : BlockNumber nfrozen = 0;
389 : :
390 : : /* all_visible must be specified */
2966 rhaas@postgresql.org 391 [ - + ]: 95541 : Assert(all_visible);
392 : :
4326 bruce@momjian.us 393 : 95541 : for (mapBlock = 0;; mapBlock++)
4566 tgl@sss.pgh.pa.us 394 : 35363 : {
395 : : Buffer mapBuffer;
396 : : uint64 *map;
397 : :
398 : : /*
399 : : * Read till we fall off the end of the map. We assume that any extra
400 : : * bytes in the last page are zeroed, so we don't bother excluding
401 : : * them from the count.
402 : : */
403 : 130904 : mapBuffer = vm_readbuf(rel, mapBlock, false);
404 [ + + ]: 130904 : if (!BufferIsValid(mapBuffer))
405 : 95541 : break;
406 : :
407 : : /*
408 : : * We choose not to lock the page, since the result is going to be
409 : : * immediately stale anyway if anyone is concurrently setting or
410 : : * clearing bits, and we only really need an approximate value.
411 : : */
1885 412 : 35363 : map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
413 : :
8 nathan@postgresql.or 414 :GNC 35363 : nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
415 [ - + ]: 35363 : if (all_frozen)
8 nathan@postgresql.or 416 :UNC 0 : nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
417 : :
4566 tgl@sss.pgh.pa.us 418 :CBC 35363 : ReleaseBuffer(mapBuffer);
419 : : }
420 : :
1885 421 : 95541 : *all_visible = nvisible;
422 [ - + ]: 95541 : if (all_frozen)
1885 tgl@sss.pgh.pa.us 423 :UBC 0 : *all_frozen = nfrozen;
4566 tgl@sss.pgh.pa.us 424 :CBC 95541 : }
425 : :
426 : : /*
427 : : * visibilitymap_prepare_truncate -
428 : : * prepare for truncation of the visibility map
429 : : *
430 : : * nheapblocks is the new size of the heap.
431 : : *
432 : : * Return the number of blocks of new visibility map.
433 : : * If it's InvalidBlockNumber, there is nothing to truncate;
434 : : * otherwise the caller is responsible for calling smgrtruncate()
435 : : * to truncate the visibility map pages.
436 : : */
437 : : BlockNumber
1664 fujii@postgresql.org 438 : 196 : visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
439 : : {
440 : : BlockNumber newnblocks;
441 : :
442 : : /* last remaining block, byte, and bit */
5611 heikki.linnakangas@i 443 : 196 : BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
5421 bruce@momjian.us 444 : 196 : uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
2872 rhaas@postgresql.org 445 : 196 : uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
446 : :
447 : : #ifdef TRACE_VISIBILITYMAP
448 : : elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
449 : : #endif
450 : :
451 : : /*
452 : : * If no visibility map has been created yet for this relation, there's
453 : : * nothing to truncate.
454 : : */
1007 tgl@sss.pgh.pa.us 455 [ - + ]: 196 : if (!smgrexists(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM))
1664 fujii@postgresql.org 456 :UBC 0 : return InvalidBlockNumber;
457 : :
458 : : /*
459 : : * Unless the new size is exactly at a visibility map page boundary, the
460 : : * tail bits in the last remaining map page, representing truncated heap
461 : : * blocks, need to be cleared. This is not only tidy, but also necessary
462 : : * because we don't get a chance to clear the bits if the heap is extended
463 : : * again.
464 : : */
2872 rhaas@postgresql.org 465 [ + + + + ]:CBC 196 : if (truncByte != 0 || truncOffset != 0)
5611 heikki.linnakangas@i 466 : 122 : {
467 : : Buffer mapBuffer;
468 : : Page page;
469 : : char *map;
470 : :
471 : 122 : newnblocks = truncBlock + 1;
472 : :
473 : 122 : mapBuffer = vm_readbuf(rel, truncBlock, false);
474 [ - + ]: 122 : if (!BufferIsValid(mapBuffer))
475 : : {
476 : : /* nothing to do, the file was already smaller */
1664 fujii@postgresql.org 477 :UBC 0 : return InvalidBlockNumber;
478 : : }
479 : :
2916 kgrittn@postgresql.o 480 :CBC 122 : page = BufferGetPage(mapBuffer);
5611 heikki.linnakangas@i 481 : 122 : map = PageGetContents(page);
482 : :
483 : 122 : LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
484 : :
485 : : /* NO EREPORT(ERROR) from here till changes are logged */
2734 486 : 122 : START_CRIT_SECTION();
487 : :
488 : : /* Clear out the unwanted bytes. */
5611 489 [ + + + - : 122 : MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
+ - - + -
- ]
490 : :
491 : : /*----
492 : : * Mask out the unwanted bits of the last remaining byte.
493 : : *
494 : : * ((1 << 0) - 1) = 00000000
495 : : * ((1 << 1) - 1) = 00000001
496 : : * ...
497 : : * ((1 << 6) - 1) = 00111111
498 : : * ((1 << 7) - 1) = 01111111
499 : : *----
500 : : */
2872 rhaas@postgresql.org 501 : 122 : map[truncByte] &= (1 << truncOffset) - 1;
502 : :
503 : : /*
504 : : * Truncation of a relation is WAL-logged at a higher-level, and we
505 : : * will be called at WAL replay. But if checksums are enabled, we need
506 : : * to still write a WAL record to protect against a torn page, if the
507 : : * page is flushed to disk before the truncation WAL record. We cannot
508 : : * use MarkBufferDirtyHint here, because that will not dirty the page
509 : : * during recovery.
510 : : */
5611 heikki.linnakangas@i 511 : 122 : MarkBufferDirty(mapBuffer);
2734 512 [ + + + + : 122 : if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
+ + + - +
- + - +
+ ]
513 : 18 : log_newpage_buffer(mapBuffer, false);
514 : :
515 [ - + ]: 122 : END_CRIT_SECTION();
516 : :
5611 517 : 122 : UnlockReleaseBuffer(mapBuffer);
518 : : }
519 : : else
520 : 74 : newnblocks = truncBlock;
521 : :
1007 tgl@sss.pgh.pa.us 522 [ + + ]: 196 : if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
523 : : {
524 : : /* nothing to do, the file was already smaller than requested size */
1664 fujii@postgresql.org 525 : 122 : return InvalidBlockNumber;
526 : : }
527 : :
528 : 74 : return newnblocks;
529 : : }
530 : :
531 : : /*
532 : : * Read a visibility map page.
533 : : *
534 : : * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
535 : : * true, the visibility map file is extended.
536 : : */
537 : : static Buffer
5611 heikki.linnakangas@i 538 : 1043439 : vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
539 : : {
540 : : Buffer buf;
541 : : SMgrRelation reln;
542 : :
543 : : /*
544 : : * Caution: re-using this smgr pointer could fail if the relcache entry
545 : : * gets closed. It's safe as long as we only do smgr-level operations
546 : : * between here and the last use of the pointer.
547 : : */
1007 tgl@sss.pgh.pa.us 548 : 1043439 : reln = RelationGetSmgr(rel);
549 : :
550 : : /*
551 : : * If we haven't cached the size of the visibility map fork yet, check it
552 : : * first.
553 : : */
554 [ + + ]: 1043439 : if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
555 : : {
556 [ + + ]: 105252 : if (smgrexists(reln, VISIBILITYMAP_FORKNUM))
557 : 43483 : smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
558 : : else
559 : 61769 : reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
560 : : }
561 : :
562 : : /*
563 : : * For reading we use ZERO_ON_ERROR mode, and initialize the page if
564 : : * necessary. It's always safe to clear bits, so it's better to clear
565 : : * corrupt pages than error out.
566 : : *
567 : : * We use the same path below to initialize pages when extending the
568 : : * relation, as a concurrent extension can end up with vm_extend()
569 : : * returning an already-initialized page.
570 : : */
571 [ + + ]: 1043439 : if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
572 : : {
5611 heikki.linnakangas@i 573 [ + + ]: 874335 : if (extend)
375 andres@anarazel.de 574 : 2348 : buf = vm_extend(rel, blkno + 1);
575 : : else
5611 heikki.linnakangas@i 576 : 871987 : return InvalidBuffer;
577 : : }
578 : : else
375 andres@anarazel.de 579 : 169104 : buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
580 : : RBM_ZERO_ON_ERROR, NULL);
581 : :
582 : : /*
583 : : * Initializing the page when needed is trickier than it looks, because of
584 : : * the possibility of multiple backends doing this concurrently, and our
585 : : * desire to not uselessly take the buffer lock in the normal path where
586 : : * the page is OK. We must take the lock to initialize the page, so
587 : : * recheck page newness after we have the lock, in case someone else
588 : : * already did it. Also, because we initially check PageIsNew with no
589 : : * lock, it's possible to fall through and return the buffer while someone
590 : : * else is still initializing the page (i.e., we might see pd_upper as set
591 : : * but other page header fields are still zeroes). This is harmless for
592 : : * callers that will take a buffer lock themselves, but some callers
593 : : * inspect the page without any lock at all. The latter is OK only so
594 : : * long as it doesn't depend on the page header having correct contents.
595 : : * Current usage is safe because PageGetContents() does not require that.
596 : : */
2916 kgrittn@postgresql.o 597 [ + + ]: 171452 : if (PageIsNew(BufferGetPage(buf)))
598 : : {
2102 tgl@sss.pgh.pa.us 599 : 2396 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
600 [ + - ]: 2396 : if (PageIsNew(BufferGetPage(buf)))
601 : 2396 : PageInit(BufferGetPage(buf), BLCKSZ, 0);
602 : 2396 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
603 : : }
5611 heikki.linnakangas@i 604 : 171452 : return buf;
605 : : }
606 : :
607 : : /*
608 : : * Ensure that the visibility map fork is at least vm_nblocks long, extending
609 : : * it if necessary with zeroed pages.
610 : : */
611 : : static Buffer
612 : 2348 : vm_extend(Relation rel, BlockNumber vm_nblocks)
613 : : {
614 : : Buffer buf;
615 : :
235 tmunro@postgresql.or 616 : 2348 : buf = ExtendBufferedRelTo(BMR_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
617 : : EB_CREATE_FORK_IF_NEEDED |
618 : : EB_CLEAR_SIZE_CACHE,
619 : : vm_nblocks,
620 : : RBM_ZERO_ON_ERROR);
621 : :
622 : : /*
623 : : * Send a shared-inval message to force other backends to close any smgr
624 : : * references they may have for this rel, which we are about to change.
625 : : * This is a useful optimization because it means that backends don't have
626 : : * to keep checking for creation or extension of the file, which happens
627 : : * infrequently.
628 : : */
375 andres@anarazel.de 629 : 2348 : CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
630 : :
631 : 2348 : return buf;
632 : : }
|