Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * visibilitymap.c
4 : * bitmap for tracking visibility of heap tuples
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/access/heap/visibilitymap.c
12 : *
13 : * INTERFACE ROUTINES
14 : * visibilitymap_clear - clear bits for one page in the visibility map
15 : * visibilitymap_pin - pin a map page for setting a bit
16 : * visibilitymap_pin_ok - check whether correct map page is already pinned
17 : * visibilitymap_set - set a bit in a previously pinned page
18 : * visibilitymap_get_status - get status of bits
19 : * visibilitymap_count - count number of bits set in visibility map
20 : * visibilitymap_prepare_truncate -
21 : * prepare for truncation of the visibility map
22 : *
23 : * NOTES
24 : *
25 : * The visibility map is a bitmap with two bits (all-visible and all-frozen)
26 : * per heap page. A set all-visible bit means that all tuples on the page are
27 : * known visible to all transactions, and therefore the page doesn't need to
28 : * be vacuumed. A set all-frozen bit means that all tuples on the page are
29 : * completely frozen, and therefore the page doesn't need to be vacuumed even
30 : * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
31 : * The all-frozen bit must be set only when the page is already all-visible.
32 : *
33 : * The map is conservative in the sense that we make sure that whenever a bit
34 : * is set, we know the condition is true, but if a bit is not set, it might or
35 : * might not be true.
36 : *
37 : * Clearing visibility map bits is not separately WAL-logged. The callers
38 : * must make sure that whenever a bit is cleared, the bit is cleared on WAL
39 : * replay of the updating operation as well.
40 : *
41 : * When we *set* a visibility map during VACUUM, we must write WAL. This may
42 : * seem counterintuitive, since the bit is basically a hint: if it is clear,
43 : * it may still be the case that every tuple on the page is visible to all
44 : * transactions; we just don't know that for certain. The difficulty is that
45 : * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
46 : * on the page itself, and the visibility map bit. If a crash occurs after the
47 : * visibility map page makes it to disk and before the updated heap page makes
48 : * it to disk, redo must set the bit on the heap page. Otherwise, the next
49 : * insert, update, or delete on the heap page will fail to realize that the
50 : * visibility map bit must be cleared, possibly causing index-only scans to
51 : * return wrong answers.
52 : *
53 : * VACUUM will normally skip pages for which the visibility map bit is set;
54 : * such pages can't contain any dead tuples and therefore don't need vacuuming.
55 : *
56 : * LOCKING
57 : *
58 : * In heapam.c, whenever a page is modified so that not all tuples on the
59 : * page are visible to everyone anymore, the corresponding bit in the
60 : * visibility map is cleared. In order to be crash-safe, we need to do this
61 : * while still holding a lock on the heap page and in the same critical
62 : * section that logs the page modification. However, we don't want to hold
63 : * the buffer lock over any I/O that may be required to read in the visibility
64 : * map page. To avoid this, we examine the heap page before locking it;
65 : * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
66 : * bit. Then, we lock the buffer. But this creates a race condition: there
67 : * is a possibility that in the time it takes to lock the buffer, the
68 : * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
69 : * buffer, pin the visibility map page, and relock the buffer. This shouldn't
70 : * happen often, because only VACUUM currently sets visibility map bits,
71 : * and the race will only occur if VACUUM processes a given page at almost
72 : * exactly the same time that someone tries to further modify it.
73 : *
74 : * To set a bit, you need to hold a lock on the heap page. That prevents
75 : * the race condition where VACUUM sees that all tuples on the page are
76 : * visible to everyone, but another backend modifies the page before VACUUM
77 : * sets the bit in the visibility map.
78 : *
79 : * When a bit is set, the LSN of the visibility map page is updated to make
80 : * sure that the visibility map update doesn't get written to disk before the
81 : * WAL record of the changes that made it possible to set the bit is flushed.
82 : * But when a bit is cleared, we don't have to do that because it's always
83 : * safe to clear a bit in the map from correctness point of view.
84 : *
85 : *-------------------------------------------------------------------------
86 : */
87 : #include "postgres.h"
88 :
89 : #include "access/heapam_xlog.h"
90 : #include "access/visibilitymap.h"
91 : #include "access/xloginsert.h"
92 : #include "access/xlogutils.h"
93 : #include "miscadmin.h"
94 : #include "port/pg_bitutils.h"
95 : #include "storage/bufmgr.h"
96 : #include "storage/lmgr.h"
97 : #include "storage/smgr.h"
98 : #include "utils/inval.h"
99 :
100 :
101 : /*#define TRACE_VISIBILITYMAP */
102 :
103 : /*
104 : * Size of the bitmap on each visibility map page, in bytes. There's no
105 : * extra headers, so the whole page minus the standard page header is
106 : * used for the bitmap.
107 : */
108 : #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
109 :
110 : /* Number of heap blocks we can represent in one byte */
111 : #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
112 :
113 : /* Number of heap blocks we can represent in one visibility map page. */
114 : #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
115 :
116 : /* Mapping from heap block number to the right bit in the visibility map */
117 : #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
118 : #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
119 : #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
120 :
121 : /* Masks for counting subsets of bits in the visibility map. */
122 : #define VISIBLE_MASK64 UINT64CONST(0x5555555555555555) /* The lower bit of each
123 : * bit pair */
124 : #define FROZEN_MASK64 UINT64CONST(0xaaaaaaaaaaaaaaaa) /* The upper bit of each
125 : * bit pair */
126 :
127 : /* prototypes for internal routines */
128 : static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
129 : static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
130 :
131 :
132 : /*
133 : * visibilitymap_clear - clear specified bits for one page in visibility map
134 : *
135 : * You must pass a buffer containing the correct map page to this function.
136 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
137 : * any I/O. Returns true if any bits have been cleared and false otherwise.
138 : */
139 : bool
202 pg 140 GNC 16118 : visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
141 : {
5240 heikki.linnakangas 142 CBC 16118 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
143 16118 : int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2501 rhaas 144 16118 : int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
2456 andres 145 16118 : uint8 mask = flags << mapOffset;
146 : char *map;
147 16118 : bool cleared = false;
148 :
149 : /* Must never clear all_visible bit while leaving all_frozen bit set */
2456 andres 150 GIC 16118 : Assert(flags & VISIBILITYMAP_VALID_BITS);
83 pg 151 GNC 16118 : Assert(flags != VISIBILITYMAP_ALL_VISIBLE);
5240 heikki.linnakangas 152 ECB :
153 : #ifdef TRACE_VISIBILITYMAP
154 : elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
155 : #endif
156 :
202 pg 157 GNC 16118 : if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
4310 rhaas 158 UIC 0 : elog(ERROR, "wrong buffer passed to visibilitymap_clear");
5240 heikki.linnakangas 159 ECB :
202 pg 160 GNC 16118 : LockBuffer(vmbuf, BUFFER_LOCK_EXCLUSIVE);
161 16118 : map = PageGetContents(BufferGetPage(vmbuf));
5240 heikki.linnakangas 162 ECB :
5240 heikki.linnakangas 163 CBC 16118 : if (map[mapByte] & mask)
164 : {
165 14081 : map[mapByte] &= ~mask;
166 :
202 pg 167 GNC 14081 : MarkBufferDirty(vmbuf);
2456 andres 168 GIC 14081 : cleared = true;
5240 heikki.linnakangas 169 ECB : }
170 :
202 pg 171 GNC 16118 : LockBuffer(vmbuf, BUFFER_LOCK_UNLOCK);
172 :
2456 andres 173 CBC 16118 : return cleared;
174 : }
5240 heikki.linnakangas 175 ECB :
176 : /*
177 : * visibilitymap_pin - pin a map page for setting a bit
178 : *
179 : * Setting a bit in the visibility map is a two-phase operation. First, call
180 : * visibilitymap_pin, to pin the visibility map page containing the bit for
181 : * the heap page. Because that can require I/O to read the map page, you
182 : * shouldn't hold a lock on the heap page while doing that. Then, call
183 : * visibilitymap_set to actually set the bit.
184 : *
185 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
186 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
187 : * relation. On return, *vmbuf is a valid buffer with the map page containing
188 : * the bit for heapBlk.
189 : *
190 : * If the page doesn't exist in the map file yet, it is extended.
191 : */
192 : void
202 pg 193 GNC 212758 : visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
194 : {
5240 heikki.linnakangas 195 CBC 212758 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
196 :
5240 heikki.linnakangas 197 ECB : /* Reuse the old pinned buffer if possible */
202 pg 198 GNC 212758 : if (BufferIsValid(*vmbuf))
199 : {
200 181837 : if (BufferGetBlockNumber(*vmbuf) == mapBlock)
5240 heikki.linnakangas 201 GIC 181837 : return;
5240 heikki.linnakangas 202 ECB :
202 pg 203 UNC 0 : ReleaseBuffer(*vmbuf);
204 : }
202 pg 205 GNC 30921 : *vmbuf = vm_readbuf(rel, mapBlock, true);
206 : }
5240 heikki.linnakangas 207 ECB :
208 : /*
209 : * visibilitymap_pin_ok - do we already have the correct page pinned?
210 : *
211 : * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
212 : * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
213 : * relation. The return value indicates whether the buffer covers the
214 : * given heapBlk.
215 : */
216 : bool
202 pg 217 GNC 14850 : visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
218 : {
4310 rhaas 219 CBC 14850 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
220 :
202 pg 221 GNC 14850 : return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
222 : }
4310 rhaas 223 ECB :
224 : /*
225 : * visibilitymap_set - set bit(s) on a previously pinned page
226 : *
227 : * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
228 : * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
229 : * one provided; in normal running, we generate a new XLOG record and set the
230 : * page LSN to that value (though the heap page's LSN may *not* be updated;
231 : * see below). cutoff_xid is the largest xmin on the page being marked
232 : * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
233 : * if the page contains no tuples. It can also be set to InvalidTransactionId
234 : * when a page that is already all-visible is being marked all-frozen.
235 : *
236 : * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
237 : * this function. Except in recovery, caller should also pass the heap
238 : * buffer. When checksums are enabled and we're not in recovery, we must add
239 : * the heap buffer to the WAL chain to protect it from being torn.
240 : *
241 : * You must pass a buffer containing the correct map page to this function.
242 : * Call visibilitymap_pin first to pin the right one. This function doesn't do
243 : * any I/O.
244 : */
245 : void
3670 simon 246 GIC 160258 : visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
247 : XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
2595 rhaas 248 ECB : uint8 flags)
249 : {
5240 heikki.linnakangas 250 GIC 160258 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
251 160258 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2501 rhaas 252 CBC 160258 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
5240 heikki.linnakangas 253 ECB : Page page;
2495 rhaas 254 : uint8 *map;
255 :
256 : #ifdef TRACE_VISIBILITYMAP
257 : elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
258 : #endif
259 :
4310 rhaas 260 GIC 160258 : Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
83 pg 261 GNC 160258 : Assert(InRecovery || PageIsAllVisible((Page) BufferGetPage(heapBuf)));
8 andres 262 160258 : Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
263 :
264 : /* Must never set all_frozen bit without also setting all_visible bit */
83 pg 265 160258 : Assert(flags != VISIBILITYMAP_ALL_FROZEN);
4310 rhaas 266 ECB :
3670 simon 267 : /* Check that we have the right heap page pinned, if present */
3670 simon 268 GIC 160258 : if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
3670 simon 269 UIC 0 : elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
5240 heikki.linnakangas 270 ECB :
271 : /* Check that we have the right VM page pinned */
3670 simon 272 GIC 160258 : if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
3670 simon 273 LBC 0 : elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
3670 simon 274 EUB :
2545 kgrittn 275 GIC 160258 : page = BufferGetPage(vmBuf);
2495 rhaas 276 160258 : map = (uint8 *) PageGetContents(page);
3670 simon 277 CBC 160258 : LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
5240 heikki.linnakangas 278 EUB :
2501 rhaas 279 GIC 160258 : if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
5240 heikki.linnakangas 280 ECB : {
4310 rhaas 281 CBC 159880 : START_CRIT_SECTION();
4310 rhaas 282 ECB :
2501 rhaas 283 GIC 159880 : map[mapByte] |= (flags << mapOffset);
3670 simon 284 CBC 159880 : MarkBufferDirty(vmBuf);
285 :
4310 rhaas 286 159880 : if (RelationNeedsWAL(rel))
287 : {
288 157479 : if (XLogRecPtrIsInvalid(recptr))
3670 simon 289 ECB : {
3670 simon 290 GIC 154036 : Assert(!InRecovery);
8 andres 291 GNC 154036 : recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
3670 simon 292 ECB :
293 : /*
3396 fujii 294 : * If data checksums are enabled (or wal_log_hints=on), we
3404 heikki.linnakangas 295 : * need to protect the heap page from being torn.
296 : *
297 : * If not, then we must *not* update the heap page's LSN. In
298 : * this case, the FPI for the heap page was omitted from the
299 : * WAL record inserted above, so it would be incorrect to
300 : * update the heap page's LSN.
301 : */
3404 heikki.linnakangas 302 GIC 154036 : if (XLogHintBitIsNeeded())
303 : {
2545 kgrittn 304 8338 : Page heapPage = BufferGetPage(heapBuf);
305 :
3670 simon 306 8338 : PageSetLSN(heapPage, recptr);
307 : }
308 : }
5240 heikki.linnakangas 309 CBC 157479 : PageSetLSN(page, recptr);
310 : }
4310 rhaas 311 ECB :
4310 rhaas 312 GIC 159880 : END_CRIT_SECTION();
5240 heikki.linnakangas 313 ECB : }
314 :
3670 simon 315 GIC 160258 : LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
5240 heikki.linnakangas 316 CBC 160258 : }
317 :
318 : /*
2595 rhaas 319 ECB : * visibilitymap_get_status - get status of bits
320 : *
321 : * Are all tuples on heapBlk visible to all or are marked frozen, according
322 : * to the visibility map?
5240 heikki.linnakangas 323 : *
324 : * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
325 : * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
326 : * relation. On return, *vmbuf is a valid buffer with the map page containing
327 : * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
328 : * releasing *vmbuf after it's done testing and setting bits.
329 : *
330 : * NOTE: This function is typically called without a lock on the heap page,
331 : * so somebody else could change the bit just after we look at it. In fact,
332 : * since we don't lock the visibility map page either, it's even possible that
333 : * someone else could have changed the bit just before we look at it, but yet
334 : * we might see the old value. It is the caller's responsibility to deal with
335 : * all concurrency issues!
336 : */
337 : uint8
202 pg 338 GNC 2832543 : visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
339 : {
5240 heikki.linnakangas 340 GIC 2832543 : BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
341 2832543 : uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
2501 rhaas 342 2832543 : uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
343 : char *map;
344 : uint8 result;
5240 heikki.linnakangas 345 ECB :
346 : #ifdef TRACE_VISIBILITYMAP
2595 rhaas 347 : elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
5240 heikki.linnakangas 348 : #endif
349 :
350 : /* Reuse the old pinned buffer if possible */
202 pg 351 GNC 2832543 : if (BufferIsValid(*vmbuf))
352 : {
353 1988469 : if (BufferGetBlockNumber(*vmbuf) != mapBlock)
354 : {
202 pg 355 UNC 0 : ReleaseBuffer(*vmbuf);
356 0 : *vmbuf = InvalidBuffer;
357 : }
5240 heikki.linnakangas 358 ECB : }
359 :
202 pg 360 GNC 2832543 : if (!BufferIsValid(*vmbuf))
361 : {
362 844074 : *vmbuf = vm_readbuf(rel, mapBlock, false);
363 844074 : if (!BufferIsValid(*vmbuf))
5240 heikki.linnakangas 364 GIC 798934 : return false;
365 : }
366 :
202 pg 367 GNC 2033609 : map = PageGetContents(BufferGetPage(*vmbuf));
368 :
5240 heikki.linnakangas 369 ECB : /*
2595 rhaas 370 : * A single byte read is atomic. There could be memory-ordering effects
3958 371 : * here, but for performance reasons we make it the caller's job to worry
372 : * about that.
373 : */
2501 rhaas 374 CBC 2033609 : result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
2501 rhaas 375 GIC 2033609 : return result;
376 : }
377 :
378 : /*
379 : * visibilitymap_count - count number of bits set in visibility map
380 : *
4195 tgl 381 ECB : * Note: we ignore the possibility of race conditions when the table is being
382 : * extended concurrently with the call. New pages added to the table aren't
383 : * going to be marked all-visible or all-frozen, so they won't affect the result.
384 : */
385 : void
2595 rhaas 386 GIC 79675 : visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
387 : {
388 : BlockNumber mapBlock;
1514 tgl 389 79675 : BlockNumber nvisible = 0;
390 79675 : BlockNumber nfrozen = 0;
391 :
392 : /* all_visible must be specified */
2595 rhaas 393 CBC 79675 : Assert(all_visible);
394 :
3955 bruce 395 GIC 79675 : for (mapBlock = 0;; mapBlock++)
4195 tgl 396 CBC 16140 : {
4195 tgl 397 ECB : Buffer mapBuffer;
398 : uint64 *map;
399 : int i;
400 :
401 : /*
3955 bruce 402 : * Read till we fall off the end of the map. We assume that any extra
403 : * bytes in the last page are zeroed, so we don't bother excluding
404 : * them from the count.
405 : */
4195 tgl 406 GIC 95815 : mapBuffer = vm_readbuf(rel, mapBlock, false);
407 95815 : if (!BufferIsValid(mapBuffer))
408 79675 : break;
409 :
410 : /*
411 : * We choose not to lock the page, since the result is going to be
412 : * immediately stale anyway if anyone is concurrently setting or
4195 tgl 413 ECB : * clearing bits, and we only really need an approximate value.
414 : */
1514 tgl 415 CBC 16140 : map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
416 :
417 : StaticAssertStmt(MAPSIZE % sizeof(uint64) == 0,
418 : "unsupported MAPSIZE");
1514 tgl 419 GIC 16140 : if (all_frozen == NULL)
420 : {
421 16495080 : for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
1514 tgl 422 CBC 16478940 : nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
423 : }
424 : else
425 : {
1514 tgl 426 LBC 0 : for (i = 0; i < MAPSIZE / sizeof(uint64); i++)
427 : {
428 0 : nvisible += pg_popcount64(map[i] & VISIBLE_MASK64);
429 0 : nfrozen += pg_popcount64(map[i] & FROZEN_MASK64);
430 : }
431 : }
432 :
4195 tgl 433 GBC 16140 : ReleaseBuffer(mapBuffer);
434 : }
1514 tgl 435 EUB :
1514 tgl 436 GBC 79675 : *all_visible = nvisible;
1514 tgl 437 GIC 79675 : if (all_frozen)
1514 tgl 438 UIC 0 : *all_frozen = nfrozen;
4195 tgl 439 GIC 79675 : }
4195 tgl 440 ECB :
441 : /*
442 : * visibilitymap_prepare_truncate -
1293 fujii 443 : * prepare for truncation of the visibility map
4807 tgl 444 : *
4807 tgl 445 EUB : * nheapblocks is the new size of the heap.
1293 fujii 446 ECB : *
447 : * Return the number of blocks of new visibility map.
448 : * If it's InvalidBlockNumber, there is nothing to truncate;
449 : * otherwise the caller is responsible for calling smgrtruncate()
450 : * to truncate the visibility map pages.
451 : */
452 : BlockNumber
1293 fujii 453 GIC 112 : visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
454 : {
455 : BlockNumber newnblocks;
456 :
457 : /* last remaining block, byte, and bit */
5240 heikki.linnakangas 458 112 : BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
5050 bruce 459 112 : uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
2501 rhaas 460 CBC 112 : uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
461 :
462 : #ifdef TRACE_VISIBILITYMAP
463 : elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
464 : #endif
5240 heikki.linnakangas 465 ECB :
466 : /*
467 : * If no visibility map has been created yet for this relation, there's
468 : * nothing to truncate.
469 : */
636 tgl 470 GIC 112 : if (!smgrexists(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM))
1293 fujii 471 UIC 0 : return InvalidBlockNumber;
472 :
473 : /*
474 : * Unless the new size is exactly at a visibility map page boundary, the
475 : * tail bits in the last remaining map page, representing truncated heap
476 : * blocks, need to be cleared. This is not only tidy, but also necessary
5050 bruce 477 ECB : * because we don't get a chance to clear the bits if the heap is extended
5050 bruce 478 EUB : * again.
479 : */
2501 rhaas 480 GIC 112 : if (truncByte != 0 || truncOffset != 0)
5240 heikki.linnakangas 481 58 : {
482 : Buffer mapBuffer;
483 : Page page;
484 : char *map;
485 :
486 58 : newnblocks = truncBlock + 1;
5240 heikki.linnakangas 487 ECB :
5240 heikki.linnakangas 488 CBC 58 : mapBuffer = vm_readbuf(rel, truncBlock, false);
5240 heikki.linnakangas 489 GIC 58 : if (!BufferIsValid(mapBuffer))
490 : {
491 : /* nothing to do, the file was already smaller */
1293 fujii 492 UIC 0 : return InvalidBlockNumber;
5240 heikki.linnakangas 493 ECB : }
494 :
2545 kgrittn 495 CBC 58 : page = BufferGetPage(mapBuffer);
5240 heikki.linnakangas 496 58 : map = PageGetContents(page);
497 :
5240 heikki.linnakangas 498 GIC 58 : LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
5240 heikki.linnakangas 499 EUB :
500 : /* NO EREPORT(ERROR) from here till changes are logged */
2363 heikki.linnakangas 501 GIC 58 : START_CRIT_SECTION();
2363 heikki.linnakangas 502 ECB :
5240 503 : /* Clear out the unwanted bytes. */
5240 heikki.linnakangas 504 GIC 58 : MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
5240 heikki.linnakangas 505 ECB :
506 : /*----
507 : * Mask out the unwanted bits of the last remaining byte.
508 : *
509 : * ((1 << 0) - 1) = 00000000
510 : * ((1 << 1) - 1) = 00000001
3484 alvherre 511 : * ...
512 : * ((1 << 6) - 1) = 00111111
513 : * ((1 << 7) - 1) = 01111111
514 : *----
515 : */
2501 rhaas 516 GIC 58 : map[truncByte] &= (1 << truncOffset) - 1;
517 :
518 : /*
519 : * Truncation of a relation is WAL-logged at a higher-level, and we
520 : * will be called at WAL replay. But if checksums are enabled, we need
521 : * to still write a WAL record to protect against a torn page, if the
522 : * page is flushed to disk before the truncation WAL record. We cannot
2363 heikki.linnakangas 523 ECB : * use MarkBufferDirtyHint here, because that will not dirty the page
524 : * during recovery.
525 : */
5240 heikki.linnakangas 526 GIC 58 : MarkBufferDirty(mapBuffer);
2363 527 58 : if (!InRecovery && RelationNeedsWAL(rel) && XLogHintBitIsNeeded())
528 15 : log_newpage_buffer(mapBuffer, false);
529 :
530 58 : END_CRIT_SECTION();
531 :
5240 532 58 : UnlockReleaseBuffer(mapBuffer);
5240 heikki.linnakangas 533 ECB : }
534 : else
5240 heikki.linnakangas 535 CBC 54 : newnblocks = truncBlock;
536 :
636 tgl 537 112 : if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
538 : {
5240 heikki.linnakangas 539 ECB : /* nothing to do, the file was already smaller than requested size */
1293 fujii 540 GIC 58 : return InvalidBlockNumber;
541 : }
5240 heikki.linnakangas 542 ECB :
1293 fujii 543 GIC 54 : return newnblocks;
5240 heikki.linnakangas 544 ECB : }
545 :
546 : /*
547 : * Read a visibility map page.
548 : *
549 : * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
550 : * true, the visibility map file is extended.
551 : */
552 : static Buffer
5240 heikki.linnakangas 553 GIC 970868 : vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
554 : {
555 : Buffer buf;
556 : SMgrRelation reln;
557 :
558 : /*
559 : * Caution: re-using this smgr pointer could fail if the relcache entry
636 tgl 560 ECB : * gets closed. It's safe as long as we only do smgr-level operations
561 : * between here and the last use of the pointer.
562 : */
636 tgl 563 GIC 970868 : reln = RelationGetSmgr(rel);
564 :
565 : /*
566 : * If we haven't cached the size of the visibility map fork yet, check it
567 : * first.
568 : */
569 970868 : if (reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] == InvalidBlockNumber)
5240 heikki.linnakangas 570 ECB : {
636 tgl 571 GIC 80723 : if (smgrexists(reln, VISIBILITYMAP_FORKNUM))
572 13293 : smgrnblocks(reln, VISIBILITYMAP_FORKNUM);
573 : else
574 67430 : reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = 0;
575 : }
5240 heikki.linnakangas 576 ECB :
577 : /*
578 : * For reading we use ZERO_ON_ERROR mode, and initialize the page if
579 : * necessary. It's always safe to clear bits, so it's better to clear
580 : * corrupt pages than error out.
581 : *
582 : * We use the same path below to initialize pages when extending the
583 : * relation, as a concurrent extension can end up with vm_extend()
584 : * returning an already-initialized page.
585 : */
636 tgl 586 CBC 970868 : if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
5240 heikki.linnakangas 587 ECB : {
5240 heikki.linnakangas 588 GIC 892058 : if (extend)
4 andres 589 GNC 13449 : buf = vm_extend(rel, blkno + 1);
590 : else
5240 heikki.linnakangas 591 GIC 878609 : return InvalidBuffer;
592 : }
593 : else
4 andres 594 GNC 78810 : buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
595 : RBM_ZERO_ON_ERROR, NULL);
596 :
597 : /*
598 : * Initializing the page when needed is trickier than it looks, because of
599 : * the possibility of multiple backends doing this concurrently, and our
1731 tgl 600 ECB : * desire to not uselessly take the buffer lock in the normal path where
601 : * the page is OK. We must take the lock to initialize the page, so
602 : * recheck page newness after we have the lock, in case someone else
603 : * already did it. Also, because we initially check PageIsNew with no
604 : * lock, it's possible to fall through and return the buffer while someone
605 : * else is still initializing the page (i.e., we might see pd_upper as set
606 : * but other page header fields are still zeroes). This is harmless for
607 : * callers that will take a buffer lock themselves, but some callers
608 : * inspect the page without any lock at all. The latter is OK only so
609 : * long as it doesn't depend on the page header having correct contents.
610 : * Current usage is safe because PageGetContents() does not require that.
611 : */
2545 kgrittn 612 GIC 92259 : if (PageIsNew(BufferGetPage(buf)))
613 : {
1731 tgl 614 13493 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
615 13493 : if (PageIsNew(BufferGetPage(buf)))
616 13493 : PageInit(BufferGetPage(buf), BLCKSZ, 0);
617 13493 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
618 : }
5240 heikki.linnakangas 619 92259 : return buf;
620 : }
621 :
622 : /*
623 : * Ensure that the visibility map fork is at least vm_nblocks long, extending
5240 heikki.linnakangas 624 ECB : * it if necessary with zeroed pages.
625 : */
626 : static Buffer
5240 heikki.linnakangas 627 CBC 13449 : vm_extend(Relation rel, BlockNumber vm_nblocks)
5240 heikki.linnakangas 628 ECB : {
629 : Buffer buf;
630 :
4 andres 631 GNC 13449 : buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
632 : EB_CREATE_FORK_IF_NEEDED |
633 : EB_CLEAR_SIZE_CACHE,
634 : vm_nblocks,
635 : RBM_ZERO_ON_ERROR);
636 :
637 : /*
638 : * Send a shared-inval message to force other backends to close any smgr
639 : * references they may have for this rel, which we are about to change.
640 : * This is a useful optimization because it means that backends don't have
641 : * to keep checking for creation or extension of the file, which happens
642 : * infrequently.
643 : */
644 13449 : CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
645 :
646 13449 : return buf;
647 : }
|