Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nbtpage.c
4 : * BTree-specific page management code for the Postgres btree access
5 : * method.
6 : *
7 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : *
11 : * IDENTIFICATION
12 : * src/backend/access/nbtree/nbtpage.c
13 : *
14 : * NOTES
15 : * Postgres btree pages look like ordinary relation pages. The opaque
16 : * data at high addresses includes pointers to left and right siblings
17 : * and flag data describing page state. The first page in a btree, page
18 : * zero, is special -- it stores meta-information describing the tree.
19 : * Pages one and higher store the actual tree data.
20 : *
21 : *-------------------------------------------------------------------------
22 : */
23 : #include "postgres.h"
24 :
25 : #include "access/nbtree.h"
26 : #include "access/nbtxlog.h"
27 : #include "access/tableam.h"
28 : #include "access/transam.h"
29 : #include "access/xlog.h"
30 : #include "access/xloginsert.h"
31 : #include "miscadmin.h"
32 : #include "storage/indexfsm.h"
33 : #include "storage/lmgr.h"
34 : #include "storage/predicate.h"
35 : #include "storage/procarray.h"
36 : #include "utils/memdebug.h"
37 : #include "utils/memutils.h"
38 : #include "utils/snapmgr.h"
39 :
40 : static BTMetaPageData *_bt_getmeta(Relation rel, Buffer metabuf);
41 : static void _bt_log_reuse_page(Relation rel, Relation heaprel, BlockNumber blkno,
42 : FullTransactionId safexid);
43 : static void _bt_delitems_delete(Relation rel, Relation heaprel, Buffer buf,
44 : TransactionId snapshotConflictHorizon,
45 : OffsetNumber *deletable, int ndeletable,
46 : BTVacuumPosting *updatable, int nupdatable);
47 : static char *_bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
48 : OffsetNumber *updatedoffsets,
49 : Size *updatedbuflen, bool needswal);
50 : static bool _bt_mark_page_halfdead(Relation rel, Relation heaprel,
51 : Buffer leafbuf, BTStack stack);
52 : static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
53 : BlockNumber scanblkno,
54 : bool *rightsib_empty,
55 : BTVacState *vstate);
56 : static bool _bt_lock_subtree_parent(Relation rel, Relation heaprel,
57 : BlockNumber child, BTStack stack,
58 : Buffer *subtreeparent, OffsetNumber *poffset,
59 : BlockNumber *topparent,
60 : BlockNumber *topparentrightsib);
61 : static void _bt_pendingfsm_add(BTVacState *vstate, BlockNumber target,
62 : FullTransactionId safexid);
63 :
64 : /*
65 : * _bt_initmetapage() -- Fill a page buffer with a correct metapage image
66 : */
6885 tgl 67 ECB : void
1138 pg 68 GIC 64175 : _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
69 : bool allequalimage)
70 : {
71 : BTMetaPageData *metad;
72 : BTPageOpaque metaopaque;
6885 tgl 73 ECB :
6885 tgl 74 GIC 64175 : _bt_pageinit(page, BLCKSZ);
6885 tgl 75 ECB :
6885 tgl 76 CBC 64175 : metad = BTPageGetMeta(page);
77 64175 : metad->btm_magic = BTREE_MAGIC;
78 64175 : metad->btm_version = BTREE_VERSION;
79 64175 : metad->btm_root = rootbknum;
80 64175 : metad->btm_level = level;
81 64175 : metad->btm_fastroot = rootbknum;
82 64175 : metad->btm_fastlevel = level;
774 pg 83 64175 : metad->btm_last_cleanup_num_delpages = 0;
1831 teodor 84 64175 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
1138 pg 85 GIC 64175 : metad->btm_allequalimage = allequalimage;
6885 tgl 86 ECB :
373 michael 87 CBC 64175 : metaopaque = BTPageGetOpaque(page);
6885 tgl 88 GIC 64175 : metaopaque->btpo_flags = BTP_META;
89 :
90 : /*
91 : * Set pd_lower just past the end of the metadata. This is essential,
92 : * because without doing so, metadata will be lost if xlog.c compresses
93 : * the page.
6520 tgl 94 ECB : */
6520 tgl 95 CBC 64175 : ((PageHeader) page)->pd_lower =
96 64175 : ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
6885 tgl 97 GIC 64175 : }
98 :
99 : /*
100 : * _bt_upgrademetapage() -- Upgrade a meta-page from an old format to version
101 : * 3, the last version that can be updated without broadly affecting
102 : * on-disk compatibility. (A REINDEX is required to upgrade to v4.)
103 : *
104 : * This routine does purely in-memory image upgrade. Caller is
105 : * responsible for locking, WAL-logging etc.
106 : */
1831 teodor 107 EUB : void
1831 teodor 108 UIC 0 : _bt_upgrademetapage(Page page)
109 : {
110 : BTMetaPageData *metad;
111 : BTPageOpaque metaopaque PG_USED_FOR_ASSERTS_ONLY;
1831 teodor 112 EUB :
1831 teodor 113 UBC 0 : metad = BTPageGetMeta(page);
373 michael 114 UIC 0 : metaopaque = BTPageGetOpaque(page);
115 :
1831 teodor 116 EUB : /* It must be really a meta page of upgradable version */
1831 teodor 117 UBC 0 : Assert(metaopaque->btpo_flags & BTP_META);
1481 pg 118 0 : Assert(metad->btm_version < BTREE_NOVAC_VERSION);
1831 teodor 119 UIC 0 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
120 :
1831 teodor 121 EUB : /* Set version number and fill extra fields added into version 3 */
1481 pg 122 UBC 0 : metad->btm_version = BTREE_NOVAC_VERSION;
774 123 0 : metad->btm_last_cleanup_num_delpages = 0;
1831 teodor 124 UIC 0 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
1138 pg 125 EUB : /* Only a REINDEX can set this field */
1138 pg 126 UBC 0 : Assert(!metad->btm_allequalimage);
1138 pg 127 UIC 0 : metad->btm_allequalimage = false;
128 :
1831 teodor 129 EUB : /* Adjust pd_lower (see _bt_initmetapage() for details) */
1831 teodor 130 UBC 0 : ((PageHeader) page)->pd_lower =
131 0 : ((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
1831 teodor 132 UIC 0 : }
133 :
134 : /*
135 : * Get metadata from share-locked buffer containing metapage, while performing
136 : * standard sanity checks.
137 : *
138 : * Callers that cache data returned here in local cache should note that an
139 : * on-the-fly upgrade using _bt_upgrademetapage() can change the version field
140 : * and BTREE_NOVAC_VERSION specific fields without invalidating local cache.
141 : */
1481 pg 142 ECB : static BTMetaPageData *
1481 pg 143 GIC 1075455 : _bt_getmeta(Relation rel, Buffer metabuf)
144 : {
145 : Page metapg;
146 : BTPageOpaque metaopaque;
147 : BTMetaPageData *metad;
1481 pg 148 ECB :
1481 pg 149 CBC 1075455 : metapg = BufferGetPage(metabuf);
373 michael 150 1075455 : metaopaque = BTPageGetOpaque(metapg);
1481 pg 151 GIC 1075455 : metad = BTPageGetMeta(metapg);
152 :
1481 pg 153 ECB : /* sanity-check the metapage */
1481 pg 154 CBC 1075455 : if (!P_ISMETA(metaopaque) ||
1481 pg 155 GBC 1075455 : metad->btm_magic != BTREE_MAGIC)
1481 pg 156 UIC 0 : ereport(ERROR,
157 : (errcode(ERRCODE_INDEX_CORRUPTED),
158 : errmsg("index \"%s\" is not a btree",
159 : RelationGetRelationName(rel))));
1481 pg 160 ECB :
1481 pg 161 CBC 1075455 : if (metad->btm_version < BTREE_MIN_VERSION ||
1481 pg 162 GBC 1075455 : metad->btm_version > BTREE_VERSION)
1481 pg 163 UIC 0 : ereport(ERROR,
164 : (errcode(ERRCODE_INDEX_CORRUPTED),
165 : errmsg("version mismatch in index \"%s\": file version %d, "
166 : "current version %d, minimal supported version %d",
167 : RelationGetRelationName(rel),
168 : metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
1481 pg 169 ECB :
1481 pg 170 GIC 1075455 : return metad;
171 : }
172 :
173 : /*
174 : * _bt_vacuum_needs_cleanup() -- Checks if index needs cleanup
175 : *
176 : * Called by btvacuumcleanup when btbulkdelete was never called because no
177 : * index tuples needed to be deleted.
178 : */
758 pg 179 ECB : bool
8 andres 180 GNC 50713 : _bt_vacuum_needs_cleanup(Relation rel, Relation heaprel)
181 : {
182 : Buffer metabuf;
183 : Page metapg;
184 : BTMetaPageData *metad;
185 : uint32 btm_version;
186 : BlockNumber prev_num_delpages;
187 :
188 : /*
189 : * Copy details from metapage to local variables quickly.
190 : *
191 : * Note that we deliberately avoid using cached version of metapage here.
758 pg 192 ECB : */
8 andres 193 GNC 50713 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
758 pg 194 CBC 50713 : metapg = BufferGetPage(metabuf);
195 50713 : metad = BTPageGetMeta(metapg);
758 pg 196 GIC 50713 : btm_version = metad->btm_version;
758 pg 197 ECB :
758 pg 198 GIC 50713 : if (btm_version < BTREE_NOVAC_VERSION)
199 : {
200 : /*
201 : * Metapage needs to be dynamically upgraded to store fields that are
202 : * only present when btm_version >= BTREE_NOVAC_VERSION
758 pg 203 EUB : */
758 pg 204 UBC 0 : _bt_relbuf(rel, metabuf);
758 pg 205 UIC 0 : return true;
206 : }
758 pg 207 ECB :
758 pg 208 CBC 50713 : prev_num_delpages = metad->btm_last_cleanup_num_delpages;
758 pg 209 GIC 50713 : _bt_relbuf(rel, metabuf);
210 :
211 : /*
212 : * Trigger cleanup in rare cases where prev_num_delpages exceeds 5% of the
213 : * total size of the index. We can reasonably expect (though are not
214 : * guaranteed) to be able to recycle this many pages if we decide to do a
215 : * btvacuumscan call during the ongoing btvacuumcleanup. For further
216 : * details see the nbtree/README section on placing deleted pages in the
217 : * FSM.
758 pg 218 ECB : */
758 pg 219 CBC 50713 : if (prev_num_delpages > 0 &&
220 5 : prev_num_delpages > RelationGetNumberOfBlocks(rel) / 20)
758 pg 221 GIC 5 : return true;
758 pg 222 ECB :
758 pg 223 GIC 50708 : return false;
224 : }
225 :
226 : /*
227 : * _bt_set_cleanup_info() -- Update metapage for btvacuumcleanup.
228 : *
229 : * Called at the end of btvacuumcleanup, when num_delpages value has been
230 : * finalized.
231 : */
1831 teodor 232 ECB : void
8 andres 233 GNC 3753 : _bt_set_cleanup_info(Relation rel, Relation heaprel, BlockNumber num_delpages)
234 : {
235 : Buffer metabuf;
236 : Page metapg;
237 : BTMetaPageData *metad;
238 :
239 : /*
240 : * On-disk compatibility note: The btm_last_cleanup_num_delpages metapage
241 : * field started out as a TransactionId field called btm_oldest_btpo_xact.
242 : * Both "versions" are just uint32 fields. It was convenient to repurpose
243 : * the field when we began to use 64-bit XIDs in deleted pages.
244 : *
245 : * It's possible that a pg_upgrade'd database will contain an XID value in
246 : * what is now recognized as the metapage's btm_last_cleanup_num_delpages
247 : * field. _bt_vacuum_needs_cleanup() may even believe that this value
248 : * indicates that there are lots of pages that it needs to recycle, when
249 : * in reality there are only one or two. The worst that can happen is
250 : * that there will be a call to btvacuumscan a little earlier, which will
251 : * set btm_last_cleanup_num_delpages to a sane value when we're called.
252 : *
253 : * Note also that the metapage's btm_last_cleanup_num_heap_tuples field is
254 : * no longer used as of PostgreSQL 14. We set it to -1.0 on rewrite, just
255 : * to be consistent.
774 pg 256 ECB : */
8 andres 257 GNC 3753 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
1831 teodor 258 CBC 3753 : metapg = BufferGetPage(metabuf);
1831 teodor 259 GIC 3753 : metad = BTPageGetMeta(metapg);
260 :
760 pg 261 ECB : /* Don't miss chance to upgrade index/metapage when BTREE_MIN_VERSION */
760 pg 262 CBC 3753 : if (metad->btm_version >= BTREE_NOVAC_VERSION &&
760 pg 263 GIC 3753 : metad->btm_last_cleanup_num_delpages == num_delpages)
264 : {
760 pg 265 ECB : /* Usually means index continues to have num_delpages of 0 */
1831 teodor 266 CBC 3690 : _bt_relbuf(rel, metabuf);
1831 teodor 267 GIC 3690 : return;
268 : }
269 :
1831 teodor 270 ECB : /* trade in our read lock for a write lock */
992 pg 271 CBC 63 : _bt_unlockbuf(rel, metabuf);
992 pg 272 GIC 63 : _bt_lockbuf(rel, metabuf, BT_WRITE);
1831 teodor 273 ECB :
1831 teodor 274 GIC 63 : START_CRIT_SECTION();
275 :
1831 teodor 276 ECB : /* upgrade meta-page if needed */
1481 pg 277 GBC 63 : if (metad->btm_version < BTREE_NOVAC_VERSION)
1831 teodor 278 UIC 0 : _bt_upgrademetapage(metapg);
279 :
1795 teodor 280 ECB : /* update cleanup-related information */
774 pg 281 CBC 63 : metad->btm_last_cleanup_num_delpages = num_delpages;
760 282 63 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
1831 teodor 283 GIC 63 : MarkBufferDirty(metabuf);
284 :
1831 teodor 285 ECB : /* write wal record if needed */
1831 teodor 286 GIC 63 : if (RelationNeedsWAL(rel))
287 : {
288 : xl_btree_metadata md;
289 : XLogRecPtr recptr;
1831 teodor 290 ECB :
1831 teodor 291 CBC 63 : XLogBeginInsert();
1831 teodor 292 GIC 63 : XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
1831 teodor 293 ECB :
1481 pg 294 CBC 63 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
295 63 : md.version = metad->btm_version;
1831 teodor 296 63 : md.root = metad->btm_root;
297 63 : md.level = metad->btm_level;
298 63 : md.fastroot = metad->btm_fastroot;
299 63 : md.fastlevel = metad->btm_fastlevel;
774 pg 300 63 : md.last_cleanup_num_delpages = num_delpages;
1138 pg 301 GIC 63 : md.allequalimage = metad->btm_allequalimage;
1831 teodor 302 ECB :
1831 teodor 303 GIC 63 : XLogRegisterBufData(0, (char *) &md, sizeof(xl_btree_metadata));
1831 teodor 304 ECB :
1831 teodor 305 GIC 63 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_META_CLEANUP);
1831 teodor 306 ECB :
1831 teodor 307 GIC 63 : PageSetLSN(metapg, recptr);
308 : }
1831 teodor 309 ECB :
1831 teodor 310 GIC 63 : END_CRIT_SECTION();
774 pg 311 ECB :
1831 teodor 312 GIC 63 : _bt_relbuf(rel, metabuf);
313 : }
314 :
315 : /*
316 : * _bt_getroot() -- Get the root page of the btree.
317 : *
318 : * Since the root page can move around the btree file, we have to read
319 : * its location from the metadata page, and then read the root page
320 : * itself. If no root page exists yet, we have to create one.
321 : *
322 : * The access type parameter (BT_READ or BT_WRITE) controls whether
323 : * a new root page will be created or not. If access = BT_READ,
324 : * and no root page exists, we just return InvalidBuffer. For
325 : * BT_WRITE, we try to create the root page if it doesn't exist.
326 : * NOTE that the returned root page will have only a read lock set
327 : * on it even if access = BT_WRITE!
328 : *
329 : * The returned page is not necessarily the true root --- it could be
330 : * a "fast root" (a page that is alone in its level due to deletions).
331 : * Also, if the root page is split while we are "in flight" to it,
332 : * what we will return is the old root, which is now just the leftmost
333 : * page on a probably-not-very-wide level. For most purposes this is
334 : * as good as or better than the true root, so we do not bother to
335 : * insist on finding the true root. We do, however, guarantee to
336 : * return a live (not deleted or half-dead) page.
337 : *
338 : * On successful return, the root page is pinned and read-locked.
339 : * The metadata page is not locked or pinned on exit.
340 : */
9770 scrappy 341 ECB : Buffer
8 andres 342 GNC 16422197 : _bt_getroot(Relation rel, Relation heaprel, int access)
343 : {
344 : Buffer metabuf;
345 : Buffer rootbuf;
346 : Page rootpage;
347 : BTPageOpaque rootopaque;
348 : BlockNumber rootblkno;
349 : uint32 rootlevel;
350 : BTMetaPageData *metad;
351 :
352 : /*
353 : * Try to use previously-cached metapage data to find the root. This
354 : * normally saves one buffer access per index search, which is a very
355 : * helpful savings in bufmgr traffic and hence contention.
6193 tgl 356 ECB : */
6193 tgl 357 GIC 16422197 : if (rel->rd_amcache != NULL)
6193 tgl 358 ECB : {
6193 tgl 359 GIC 16022468 : metad = (BTMetaPageData *) rel->rd_amcache;
6193 tgl 360 ECB : /* We shouldn't have cached it if any of these fail */
6193 tgl 361 CBC 16022468 : Assert(metad->btm_magic == BTREE_MAGIC);
1831 teodor 362 16022468 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
363 16022468 : Assert(metad->btm_version <= BTREE_VERSION);
1138 pg 364 GIC 16022468 : Assert(!metad->btm_allequalimage ||
1138 pg 365 ECB : metad->btm_version > BTREE_NOVAC_VERSION);
6193 tgl 366 GIC 16022468 : Assert(metad->btm_root != P_NONE);
6193 tgl 367 ECB :
6193 tgl 368 CBC 16022468 : rootblkno = metad->btm_fastroot;
369 16022468 : Assert(rootblkno != P_NONE);
6193 tgl 370 GIC 16022468 : rootlevel = metad->btm_fastlevel;
6193 tgl 371 ECB :
8 andres 372 GNC 16022468 : rootbuf = _bt_getbuf(rel, heaprel, rootblkno, BT_READ);
2545 kgrittn 373 CBC 16022468 : rootpage = BufferGetPage(rootbuf);
373 michael 374 GIC 16022468 : rootopaque = BTPageGetOpaque(rootpage);
375 :
376 : /*
377 : * Since the cache might be stale, we check the page more carefully
378 : * here than normal. We *must* check that it's not deleted. If it's
379 : * not alone on its level, then we reject too --- this may be overly
380 : * paranoid but better safe than sorry. Note we don't check P_ISROOT,
381 : * because that's not set in a "fast root".
6193 tgl 382 ECB : */
6193 tgl 383 CBC 16022468 : if (!P_IGNORE(rootopaque) &&
774 pg 384 16022468 : rootopaque->btpo_level == rootlevel &&
6193 tgl 385 16022468 : P_LEFTMOST(rootopaque) &&
6193 tgl 386 GIC 16022468 : P_RIGHTMOST(rootopaque))
387 : {
6193 tgl 388 ECB : /* OK, accept cached page as the root */
6193 tgl 389 GIC 16019764 : return rootbuf;
6193 tgl 390 ECB : }
6193 tgl 391 GIC 2704 : _bt_relbuf(rel, rootbuf);
6193 tgl 392 ECB : /* Cache is stale, throw it away */
6193 tgl 393 CBC 2704 : if (rel->rd_amcache)
394 2704 : pfree(rel->rd_amcache);
6193 tgl 395 GIC 2704 : rel->rd_amcache = NULL;
396 : }
6193 tgl 397 ECB :
8 andres 398 GNC 402433 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
1361 pg 399 GIC 402433 : metad = _bt_getmeta(rel, metabuf);
400 :
9345 bruce 401 ECB : /* if no root page initialized yet, do it */
9345 bruce 402 GIC 402433 : if (metad->btm_root == P_NONE)
403 : {
404 : Page metapg;
405 :
8297 tgl 406 ECB : /* If access = BT_READ, caller doesn't want us to create root yet */
8297 tgl 407 GIC 399625 : if (access == BT_READ)
8297 tgl 408 ECB : {
7938 tgl 409 CBC 388951 : _bt_relbuf(rel, metabuf);
8297 tgl 410 GIC 388951 : return InvalidBuffer;
411 : }
412 :
8297 tgl 413 ECB : /* trade in our read lock for a write lock */
992 pg 414 CBC 10674 : _bt_unlockbuf(rel, metabuf);
992 pg 415 GIC 10674 : _bt_lockbuf(rel, metabuf, BT_WRITE);
416 :
417 : /*
418 : * Race condition: if someone else initialized the metadata between
419 : * the time we released the read lock and acquired the write lock, we
420 : * must avoid doing it again.
9345 bruce 421 ECB : */
7351 tgl 422 GIC 10674 : if (metad->btm_root != P_NONE)
423 : {
424 : /*
425 : * Metadata initialized by someone else. In order to guarantee no
426 : * deadlocks, we have to release the metadata page and start all
427 : * over again. (Is that really true? But it's hardly worth trying
428 : * to optimize this case.)
9345 bruce 429 EUB : */
7351 tgl 430 UBC 0 : _bt_relbuf(rel, metabuf);
8 andres 431 UNC 0 : return _bt_getroot(rel, heaprel, access);
432 : }
433 :
434 : /*
435 : * Get, initialize, write, and leave a lock of the appropriate type on
436 : * the new root page. Since this is the first page in the tree, it's
437 : * a leaf as well as the root.
7351 tgl 438 ECB : */
8 andres 439 GNC 10674 : rootbuf = _bt_getbuf(rel, heaprel, P_NEW, BT_WRITE);
7351 tgl 440 CBC 10674 : rootblkno = BufferGetBlockNumber(rootbuf);
2545 kgrittn 441 10674 : rootpage = BufferGetPage(rootbuf);
373 michael 442 10674 : rootopaque = BTPageGetOpaque(rootpage);
7351 tgl 443 10674 : rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
444 10674 : rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
774 pg 445 10674 : rootopaque->btpo_level = 0;
6180 tgl 446 GIC 10674 : rootopaque->btpo_cycleid = 0;
1361 pg 447 ECB : /* Get raw page pointer for metapage */
1361 pg 448 GIC 10674 : metapg = BufferGetPage(metabuf);
449 :
7351 tgl 450 ECB : /* NO ELOG(ERROR) till meta is updated */
7351 tgl 451 GIC 10674 : START_CRIT_SECTION();
452 :
1775 teodor 453 ECB : /* upgrade metapage if needed */
1481 pg 454 GBC 10674 : if (metad->btm_version < BTREE_NOVAC_VERSION)
1775 teodor 455 UIC 0 : _bt_upgrademetapage(metapg);
1775 teodor 456 ECB :
7351 tgl 457 CBC 10674 : metad->btm_root = rootblkno;
458 10674 : metad->btm_level = 0;
459 10674 : metad->btm_fastroot = rootblkno;
460 10674 : metad->btm_fastlevel = 0;
774 pg 461 10674 : metad->btm_last_cleanup_num_delpages = 0;
1831 teodor 462 GIC 10674 : metad->btm_last_cleanup_num_heap_tuples = -1.0;
7351 tgl 463 ECB :
6218 tgl 464 CBC 10674 : MarkBufferDirty(rootbuf);
6218 tgl 465 GIC 10674 : MarkBufferDirty(metabuf);
466 :
7351 tgl 467 ECB : /* XLOG stuff */
4500 rhaas 468 GIC 10674 : if (RelationNeedsWAL(rel))
469 : {
470 : xl_btree_newroot xlrec;
471 : XLogRecPtr recptr;
472 : xl_btree_metadata md;
3062 heikki.linnakangas 473 ECB :
3062 heikki.linnakangas 474 CBC 10465 : XLogBeginInsert();
475 10465 : XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
1983 tgl 476 GIC 10465 : XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
3062 heikki.linnakangas 477 ECB :
1481 pg 478 CBC 10465 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
479 10465 : md.version = metad->btm_version;
3062 heikki.linnakangas 480 10465 : md.root = rootblkno;
481 10465 : md.level = 0;
482 10465 : md.fastroot = rootblkno;
483 10465 : md.fastlevel = 0;
774 pg 484 10465 : md.last_cleanup_num_delpages = 0;
1138 pg 485 GIC 10465 : md.allequalimage = metad->btm_allequalimage;
3062 heikki.linnakangas 486 ECB :
3062 heikki.linnakangas 487 GIC 10465 : XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
8213 vadim4o 488 ECB :
7351 tgl 489 CBC 10465 : xlrec.rootblk = rootblkno;
7351 tgl 490 GIC 10465 : xlrec.level = 0;
7352 tgl 491 ECB :
3062 heikki.linnakangas 492 GIC 10465 : XLogRegisterData((char *) &xlrec, SizeOfBtreeNewroot);
8222 vadim4o 493 ECB :
3062 heikki.linnakangas 494 GIC 10465 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
8222 vadim4o 495 ECB :
7351 tgl 496 CBC 10465 : PageSetLSN(rootpage, recptr);
7351 tgl 497 GIC 10465 : PageSetLSN(metapg, recptr);
498 : }
8297 tgl 499 ECB :
7351 tgl 500 GIC 10674 : END_CRIT_SECTION();
501 :
502 : /*
503 : * swap root write lock for read lock. There is no danger of anyone
504 : * else accessing the new root page while it's unlocked, since no one
505 : * else knows where it is yet.
7351 tgl 506 ECB : */
992 pg 507 CBC 10674 : _bt_unlockbuf(rel, rootbuf);
992 pg 508 GIC 10674 : _bt_lockbuf(rel, rootbuf, BT_READ);
509 :
1361 pg 510 ECB : /* okay, metadata is correct, release lock on it without caching */
6218 tgl 511 GIC 10674 : _bt_relbuf(rel, metabuf);
512 : }
513 : else
9345 bruce 514 ECB : {
7352 tgl 515 CBC 2808 : rootblkno = metad->btm_fastroot;
7351 516 2808 : Assert(rootblkno != P_NONE);
7351 tgl 517 GIC 2808 : rootlevel = metad->btm_fastlevel;
518 :
519 : /*
520 : * Cache the metapage data for next time
6193 tgl 521 ECB : */
1361 pg 522 GIC 2808 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
1361 pg 523 ECB : sizeof(BTMetaPageData));
1361 pg 524 GIC 2808 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
525 :
526 : /*
527 : * We are done with the metapage; arrange to release it via first
528 : * _bt_relandgetbuf call
6927 tgl 529 ECB : */
6927 tgl 530 GIC 2808 : rootbuf = metabuf;
531 :
532 : for (;;)
7351 tgl 533 ECB : {
6927 tgl 534 CBC 2808 : rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
2545 kgrittn 535 2808 : rootpage = BufferGetPage(rootbuf);
373 michael 536 GIC 2808 : rootopaque = BTPageGetOpaque(rootpage);
7351 tgl 537 ECB :
7351 tgl 538 CBC 2808 : if (!P_IGNORE(rootopaque))
7351 tgl 539 GIC 2808 : break;
540 :
7351 tgl 541 EUB : /* it's dead, Jim. step right one page */
7351 tgl 542 UBC 0 : if (P_RIGHTMOST(rootopaque))
5578 tgl 543 UIC 0 : elog(ERROR, "no live root page found in index \"%s\"",
7351 tgl 544 EUB : RelationGetRelationName(rel));
7351 tgl 545 UIC 0 : rootblkno = rootopaque->btpo_next;
546 : }
7351 tgl 547 ECB :
774 pg 548 GBC 2808 : if (rootopaque->btpo_level != rootlevel)
5578 tgl 549 UIC 0 : elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
550 : rootblkno, RelationGetRelationName(rel),
551 : rootopaque->btpo_level, rootlevel);
552 : }
553 :
554 : /*
555 : * By here, we have a pin and read lock on the root page, and no lock set
556 : * on the metadata page. Return the root page's buffer.
9345 bruce 557 ECB : */
7352 tgl 558 GIC 13482 : return rootbuf;
559 : }
560 :
561 : /*
562 : * _bt_gettrueroot() -- Get the true root page of the btree.
563 : *
564 : * This is the same as the BT_READ case of _bt_getroot(), except
565 : * we follow the true-root link not the fast-root link.
566 : *
567 : * By the time we acquire lock on the root page, it might have been split and
568 : * not be the true root anymore. This is okay for the present uses of this
569 : * routine; we only really need to be able to move up at least one tree level
570 : * from whatever non-root page we were at. If we ever do need to lock the
571 : * one true root page, we could loop here, re-reading the metapage on each
572 : * failure. (Note that it wouldn't do to hold the lock on the metapage while
573 : * moving to the root --- that'd deadlock against any concurrent root split.)
574 : */
7352 tgl 575 ECB : Buffer
8 andres 576 GNC 12 : _bt_gettrueroot(Relation rel, Relation heaprel)
577 : {
578 : Buffer metabuf;
579 : Page metapg;
580 : BTPageOpaque metaopaque;
581 : Buffer rootbuf;
582 : Page rootpage;
583 : BTPageOpaque rootopaque;
584 : BlockNumber rootblkno;
585 : uint32 rootlevel;
586 : BTMetaPageData *metad;
587 :
588 : /*
589 : * We don't try to use cached metapage data here, since (a) this path is
590 : * not performance-critical, and (b) if we are here it suggests our cache
591 : * is out-of-date anyway. In light of point (b), it's probably safest to
592 : * actively flush any cached metapage info.
6193 tgl 593 ECB : */
6193 tgl 594 CBC 12 : if (rel->rd_amcache)
595 12 : pfree(rel->rd_amcache);
6193 tgl 596 GIC 12 : rel->rd_amcache = NULL;
6193 tgl 597 ECB :
8 andres 598 GNC 12 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
2545 kgrittn 599 CBC 12 : metapg = BufferGetPage(metabuf);
373 michael 600 12 : metaopaque = BTPageGetOpaque(metapg);
7352 tgl 601 GIC 12 : metad = BTPageGetMeta(metapg);
8053 bruce 602 ECB :
2029 tgl 603 CBC 12 : if (!P_ISMETA(metaopaque) ||
7352 tgl 604 GBC 12 : metad->btm_magic != BTREE_MAGIC)
7202 tgl 605 UIC 0 : ereport(ERROR,
606 : (errcode(ERRCODE_INDEX_CORRUPTED),
607 : errmsg("index \"%s\" is not a btree",
608 : RelationGetRelationName(rel))));
7352 tgl 609 ECB :
1831 teodor 610 CBC 12 : if (metad->btm_version < BTREE_MIN_VERSION ||
1831 teodor 611 GBC 12 : metad->btm_version > BTREE_VERSION)
7202 tgl 612 UIC 0 : ereport(ERROR,
613 : (errcode(ERRCODE_INDEX_CORRUPTED),
614 : errmsg("version mismatch in index \"%s\": file version %d, "
615 : "current version %d, minimal supported version %d",
616 : RelationGetRelationName(rel),
617 : metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION)));
618 :
7352 tgl 619 ECB : /* if no root page initialized yet, fail */
7352 tgl 620 GIC 12 : if (metad->btm_root == P_NONE)
7352 tgl 621 EUB : {
7352 tgl 622 UBC 0 : _bt_relbuf(rel, metabuf);
7352 tgl 623 UIC 0 : return InvalidBuffer;
624 : }
9345 bruce 625 ECB :
7352 tgl 626 CBC 12 : rootblkno = metad->btm_root;
7351 tgl 627 GIC 12 : rootlevel = metad->btm_level;
628 :
629 : /*
630 : * We are done with the metapage; arrange to release it via first
631 : * _bt_relandgetbuf call
6927 tgl 632 ECB : */
6927 tgl 633 GIC 12 : rootbuf = metabuf;
634 :
635 : for (;;)
7351 tgl 636 ECB : {
6927 tgl 637 CBC 12 : rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
2545 kgrittn 638 12 : rootpage = BufferGetPage(rootbuf);
373 michael 639 GIC 12 : rootopaque = BTPageGetOpaque(rootpage);
7351 tgl 640 ECB :
7351 tgl 641 CBC 12 : if (!P_IGNORE(rootopaque))
7351 tgl 642 GIC 12 : break;
643 :
7351 tgl 644 EUB : /* it's dead, Jim. step right one page */
7351 tgl 645 UBC 0 : if (P_RIGHTMOST(rootopaque))
5578 tgl 646 UIC 0 : elog(ERROR, "no live root page found in index \"%s\"",
7351 tgl 647 EUB : RelationGetRelationName(rel));
7351 tgl 648 UIC 0 : rootblkno = rootopaque->btpo_next;
649 : }
7351 tgl 650 ECB :
774 pg 651 GBC 12 : if (rootopaque->btpo_level != rootlevel)
5578 tgl 652 UIC 0 : elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
653 : rootblkno, RelationGetRelationName(rel),
654 : rootopaque->btpo_level, rootlevel);
7352 tgl 655 ECB :
8986 bruce 656 GIC 12 : return rootbuf;
657 : }
658 :
659 : /*
660 : * _bt_getrootheight() -- Get the height of the btree search tree.
661 : *
662 : * We return the level (counting from zero) of the current fast root.
663 : * This represents the number of tree levels we'd have to descend through
664 : * to start any btree index search.
665 : *
666 : * This is used by the planner for cost-estimation purposes. Since it's
667 : * only an estimate, slightly-stale data is fine, hence we don't worry
668 : * about updating previously cached data.
669 : */
3740 tgl 670 ECB : int
8 andres 671 GNC 3711818 : _bt_getrootheight(Relation rel, Relation heaprel)
672 : {
673 : BTMetaPageData *metad;
3740 tgl 674 ECB :
3740 tgl 675 GIC 3711818 : if (rel->rd_amcache == NULL)
676 : {
677 : Buffer metabuf;
3740 tgl 678 ECB :
8 andres 679 GNC 30369 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
1481 pg 680 GIC 30369 : metad = _bt_getmeta(rel, metabuf);
681 :
682 : /*
683 : * If there's no root page yet, _bt_getroot() doesn't expect a cache
684 : * to be made, so just stop here and report the index height is zero.
685 : * (XXX perhaps _bt_getroot() should be changed to allow this case.)
3740 tgl 686 ECB : */
3740 tgl 687 GIC 30369 : if (metad->btm_root == P_NONE)
3740 tgl 688 ECB : {
3740 tgl 689 CBC 12647 : _bt_relbuf(rel, metabuf);
3740 tgl 690 GIC 12647 : return 0;
691 : }
692 :
693 : /*
694 : * Cache the metapage data for next time
3740 tgl 695 ECB : */
1361 pg 696 GIC 17722 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
1361 pg 697 ECB : sizeof(BTMetaPageData));
1361 pg 698 CBC 17722 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
3740 tgl 699 GIC 17722 : _bt_relbuf(rel, metabuf);
700 : }
701 :
1481 pg 702 ECB : /* Get cached page */
3740 tgl 703 GIC 3699171 : metad = (BTMetaPageData *) rel->rd_amcache;
1361 pg 704 ECB : /* We shouldn't have cached it if any of these fail */
1361 pg 705 CBC 3699171 : Assert(metad->btm_magic == BTREE_MAGIC);
706 3699171 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
707 3699171 : Assert(metad->btm_version <= BTREE_VERSION);
1138 pg 708 GIC 3699171 : Assert(!metad->btm_allequalimage ||
1138 pg 709 ECB : metad->btm_version > BTREE_NOVAC_VERSION);
1361 pg 710 GIC 3699171 : Assert(metad->btm_fastroot != P_NONE);
3740 tgl 711 ECB :
3740 tgl 712 GIC 3699171 : return metad->btm_fastlevel;
713 : }
714 :
715 : /*
716 : * _bt_metaversion() -- Get version/status info from metapage.
717 : *
718 : * Sets caller's *heapkeyspace and *allequalimage arguments using data
719 : * from the B-Tree metapage (could be locally-cached version). This
720 : * information needs to be stashed in insertion scankey, so we provide a
721 : * single function that fetches both at once.
722 : *
723 : * This is used to determine the rules that must be used to descend a
724 : * btree. Version 4 indexes treat heap TID as a tiebreaker attribute.
725 : * pg_upgrade'd version 3 indexes need extra steps to preserve reasonable
726 : * performance when inserting a new BTScanInsert-wise duplicate tuple
727 : * among many leaf pages already full of such duplicates.
728 : *
729 : * Also sets allequalimage field, which indicates whether or not it is
730 : * safe to apply deduplication. We rely on the assumption that
731 : * btm_allequalimage will be zero'ed on heapkeyspace indexes that were
732 : * pg_upgrade'd from Postgres 12.
733 : */
1138 pg 734 ECB : void
8 andres 735 GNC 18007284 : _bt_metaversion(Relation rel, Relation heaprel, bool *heapkeyspace, bool *allequalimage)
736 : {
737 : BTMetaPageData *metad;
1481 pg 738 ECB :
1481 pg 739 GIC 18007284 : if (rel->rd_amcache == NULL)
740 : {
741 : Buffer metabuf;
1481 pg 742 ECB :
8 andres 743 GNC 642653 : metabuf = _bt_getbuf(rel, heaprel, BTREE_METAPAGE, BT_READ);
1481 pg 744 GIC 642653 : metad = _bt_getmeta(rel, metabuf);
745 :
746 : /*
747 : * If there's no root page yet, _bt_getroot() doesn't expect a cache
748 : * to be made, so just stop here. (XXX perhaps _bt_getroot() should
749 : * be changed to allow this case.)
1481 pg 750 ECB : */
1481 pg 751 GIC 642653 : if (metad->btm_root == P_NONE)
1481 pg 752 ECB : {
1138 pg 753 CBC 397920 : *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
1138 pg 754 GIC 397920 : *allequalimage = metad->btm_allequalimage;
1481 pg 755 ECB :
1481 pg 756 CBC 397920 : _bt_relbuf(rel, metabuf);
1138 pg 757 GIC 397920 : return;
758 : }
759 :
760 : /*
761 : * Cache the metapage data for next time
762 : *
763 : * An on-the-fly version upgrade performed by _bt_upgrademetapage()
764 : * can change the nbtree version for an index without invalidating any
765 : * local cache. This is okay because it can only happen when moving
766 : * from version 2 to version 3, both of which are !heapkeyspace
767 : * versions.
1481 pg 768 ECB : */
1361 pg 769 GIC 244733 : rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
1361 pg 770 ECB : sizeof(BTMetaPageData));
1361 pg 771 CBC 244733 : memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
1481 pg 772 GIC 244733 : _bt_relbuf(rel, metabuf);
773 : }
774 :
1481 pg 775 ECB : /* Get cached page */
1481 pg 776 GIC 17609364 : metad = (BTMetaPageData *) rel->rd_amcache;
1361 pg 777 ECB : /* We shouldn't have cached it if any of these fail */
1361 pg 778 CBC 17609364 : Assert(metad->btm_magic == BTREE_MAGIC);
779 17609364 : Assert(metad->btm_version >= BTREE_MIN_VERSION);
780 17609364 : Assert(metad->btm_version <= BTREE_VERSION);
1138 pg 781 GIC 17609364 : Assert(!metad->btm_allequalimage ||
1138 pg 782 ECB : metad->btm_version > BTREE_NOVAC_VERSION);
1361 pg 783 GIC 17609364 : Assert(metad->btm_fastroot != P_NONE);
1481 pg 784 ECB :
1138 pg 785 CBC 17609364 : *heapkeyspace = metad->btm_version > BTREE_NOVAC_VERSION;
1138 pg 786 GIC 17609364 : *allequalimage = metad->btm_allequalimage;
787 : }
788 :
789 : /*
790 : * _bt_checkpage() -- Verify that a freshly-read page looks sane.
791 : */
6363 tgl 792 ECB : void
6363 tgl 793 GIC 29697296 : _bt_checkpage(Relation rel, Buffer buf)
6363 tgl 794 ECB : {
2545 kgrittn 795 GIC 29697296 : Page page = BufferGetPage(buf);
796 :
797 : /*
798 : * ReadBuffer verifies that every newly-read page passes
799 : * PageHeaderIsValid, which means it either contains a reasonably sane
800 : * page header or is all-zero. We have to defend against the all-zero
801 : * case, however.
6363 tgl 802 ECB : */
6363 tgl 803 GBC 29697296 : if (PageIsNew(page))
6363 tgl 804 UIC 0 : ereport(ERROR,
805 : (errcode(ERRCODE_INDEX_CORRUPTED),
806 : errmsg("index \"%s\" contains unexpected zero page at block %u",
807 : RelationGetRelationName(rel),
808 : BufferGetBlockNumber(buf)),
809 : errhint("Please REINDEX it.")));
810 :
811 : /*
812 : * Additionally check that the special area looks sane.
6363 tgl 813 ECB : */
5383 tgl 814 GBC 29697296 : if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData)))
6363 tgl 815 UIC 0 : ereport(ERROR,
816 : (errcode(ERRCODE_INDEX_CORRUPTED),
817 : errmsg("index \"%s\" contains corrupted page at block %u",
818 : RelationGetRelationName(rel),
819 : BufferGetBlockNumber(buf)),
6363 tgl 820 ECB : errhint("Please REINDEX it.")));
6363 tgl 821 GIC 29697296 : }
822 :
823 : /*
824 : * Log the reuse of a page from the FSM.
825 : */
4803 simon 826 ECB : static void
8 andres 827 GNC 228 : _bt_log_reuse_page(Relation rel, Relation heaprel, BlockNumber blkno,
828 : FullTransactionId safexid)
829 : {
830 : xl_btree_reuse_page xlrec_reuse;
831 :
832 : /*
833 : * Note that we don't register the buffer with the record, because this
834 : * operation doesn't modify the page. This record only exists to provide a
835 : * conflict point for Hot Standby.
836 : */
837 :
838 : /* XLOG stuff */
7 839 228 : xlrec_reuse.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
277 rhaas 840 228 : xlrec_reuse.locator = rel->rd_locator;
3062 heikki.linnakangas 841 CBC 228 : xlrec_reuse.block = blkno;
143 pg 842 GNC 228 : xlrec_reuse.snapshotConflictHorizon = safexid;
4803 simon 843 ECB :
3062 heikki.linnakangas 844 GIC 228 : XLogBeginInsert();
3062 heikki.linnakangas 845 CBC 228 : XLogRegisterData((char *) &xlrec_reuse, SizeOfBtreeReusePage);
4803 simon 846 ECB :
3062 heikki.linnakangas 847 GIC 228 : XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE);
4803 simon 848 CBC 228 : }
4803 simon 849 ECB :
850 : /*
851 : * _bt_getbuf() -- Get a buffer by block number for read or write.
852 : *
853 : * blkno == P_NEW means to get an unallocated index page. The page
854 : * will be initialized before returning it.
855 : *
856 : * The general rule in nbtree is that it's never okay to access a
857 : * page without holding both a buffer pin and a buffer lock on
858 : * the page's buffer.
859 : *
860 : * When this routine returns, the appropriate lock is set on the
861 : * requested buffer and its reference count has been incremented
862 : * (ie, the buffer is "locked and pinned"). Also, we apply
863 : * _bt_checkpage to sanity-check the page (except in P_NEW case),
864 : * and perform Valgrind client requests that help Valgrind detect
865 : * unsafe page accesses.
866 : *
867 : * Note: raw LockBuffer() calls are disallowed in nbtree; all
868 : * buffer lock requests need to go through wrapper functions such
869 : * as _bt_lockbuf().
870 : */
871 : Buffer
8 andres 872 GNC 17349067 : _bt_getbuf(Relation rel, Relation heaprel, BlockNumber blkno, int access)
9770 scrappy 873 ECB : {
874 : Buffer buf;
875 :
9345 bruce 876 GIC 17349067 : if (blkno != P_NEW)
9345 bruce 877 ECB : {
878 : /* Read an existing block of the relation */
9345 bruce 879 GIC 17312296 : buf = ReadBuffer(rel, blkno);
992 pg 880 CBC 17312296 : _bt_lockbuf(rel, buf, access);
6363 tgl 881 17312296 : _bt_checkpage(rel, buf);
9345 bruce 882 ECB : }
883 : else
884 : {
885 : Page page;
886 :
7350 tgl 887 CBC 36771 : Assert(access == BT_WRITE);
888 :
889 : /*
890 : * First see if the FSM knows of any free pages.
891 : *
892 : * We can't trust the FSM's report unreservedly; we have to check that
893 : * the page is still free. (For example, an already-free page could
894 : * have been re-used between the time the last VACUUM scanned it and
895 : * the time the VACUUM made its FSM updates.)
896 : *
897 : * In fact, it's worse than that: we can't even assume that it's safe
898 : * to take a lock on the reported page. If somebody else has a lock
899 : * on it, or even worse our own caller does, we could deadlock. (The
900 : * own-caller scenario is actually not improbable. Consider an index
901 : * on a serial or timestamp column. Nearly all splits will be at the
902 : * rightmost page, so it's entirely likely that _bt_split will call us
903 : * while holding a lock on the page most recently acquired from FSM. A
904 : * VACUUM running concurrently with the previous split could well have
905 : * placed that page back in FSM.)
906 : *
907 : * To get around that, we ask for only a conditional lock on the
908 : * reported page. If we fail, then someone else is using the page,
909 : * and we may reasonably assume it's not free. (If we happen to be
910 : * wrong, the worst consequence is the page will be lost to use till
911 : * the next VACUUM, which is no big problem.)
912 : */
913 : for (;;)
914 : {
5304 heikki.linnakangas 915 36771 : blkno = GetFreeIndexPage(rel);
7350 tgl 916 36771 : if (blkno == InvalidBlockNumber)
917 36495 : break;
918 276 : buf = ReadBuffer(rel, blkno);
992 pg 919 276 : if (_bt_conditionallockbuf(rel, buf))
920 : {
2545 kgrittn 921 276 : page = BufferGetPage(buf);
922 :
923 : /*
924 : * It's possible to find an all-zeroes page in an index. For
925 : * example, a backend might successfully extend the relation
926 : * one page and then crash before it is able to make a WAL
927 : * entry for adding the page. If we find a zeroed page then
928 : * reclaim it immediately.
929 : */
774 pg 930 276 : if (PageIsNew(page))
931 : {
932 : /* Okay to use page. Initialize and return it. */
774 pg 933 UBC 0 : _bt_pageinit(page, BufferGetPageSize(buf));
774 pg 934 CBC 276 : return buf;
935 : }
936 :
6 pg 937 GNC 276 : if (BTPageIsRecyclable(page, heaprel))
938 : {
939 : /*
940 : * If we are generating WAL for Hot Standby then create a
941 : * WAL record that will allow us to conflict with queries
942 : * running on standby, in case they have snapshots older
943 : * than safexid value
944 : */
774 pg 945 CBC 276 : if (XLogStandbyInfoActive() && RelationNeedsWAL(rel))
8 andres 946 GNC 228 : _bt_log_reuse_page(rel, heaprel, blkno,
947 : BTPageGetDeleteXid(page));
948 :
949 : /* Okay to use page. Re-initialize and return it. */
7182 tgl 950 CBC 276 : _bt_pageinit(page, BufferGetPageSize(buf));
951 276 : return buf;
952 : }
7182 tgl 953 UBC 0 : elog(DEBUG2, "FSM returned nonrecyclable page");
954 0 : _bt_relbuf(rel, buf);
955 : }
956 : else
957 : {
958 0 : elog(DEBUG2, "FSM returned nonlockable page");
959 : /* couldn't get lock, so just drop pin */
960 0 : ReleaseBuffer(buf);
961 : }
962 : }
963 :
964 : /*
965 : * Extend the relation by one page. Need to use RBM_ZERO_AND_LOCK or
966 : * we risk a race condition against btvacuumscan --- see comments
967 : * therein. This forces us to repeat the valgrind request that
968 : * _bt_lockbuf() otherwise would make, as we can't use _bt_lockbuf()
969 : * without introducing a race.
7351 tgl 970 ECB : */
4 andres 971 GNC 36495 : buf = ExtendBufferedRel(EB_REL(rel), MAIN_FORKNUM, NULL,
972 : EB_LOCK_FIRST);
973 36495 : if (!RelationUsesLocalBuffers(rel))
974 : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
975 :
976 : /* Initialize the new page before returning it */
2545 kgrittn 977 GIC 36495 : page = BufferGetPage(buf);
5383 tgl 978 36495 : Assert(PageIsNew(page));
9345 bruce 979 36495 : _bt_pageinit(page, BufferGetPageSize(buf));
980 : }
981 :
982 : /* ref count and lock type are correct */
8986 983 17348791 : return buf;
984 : }
9770 scrappy 985 ECB :
986 : /*
987 : * _bt_relandgetbuf() -- release a locked buffer and get another one.
988 : *
6927 tgl 989 : * This is equivalent to _bt_relbuf followed by _bt_getbuf, with the
990 : * exception that blkno may not be P_NEW. Also, if obuf is InvalidBuffer
991 : * then it reduces to just _bt_getbuf; allowing this case simplifies some
5087 992 : * callers.
993 : *
994 : * The original motivation for using this was to avoid two entries to the
995 : * bufmgr when one would do. However, now it's mainly just a notational
996 : * convenience. The only case where it saves work over _bt_relbuf/_bt_getbuf
997 : * is when the target page is the same one already in the buffer.
998 : */
999 : Buffer
6927 tgl 1000 GIC 12301576 : _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
1001 : {
1002 : Buffer buf;
1003 :
1004 12301576 : Assert(blkno != P_NEW);
6927 tgl 1005 CBC 12301576 : if (BufferIsValid(obuf))
992 pg 1006 GIC 12293674 : _bt_unlockbuf(rel, obuf);
6927 tgl 1007 CBC 12301576 : buf = ReleaseAndReadBuffer(obuf, rel, blkno);
992 pg 1008 12301576 : _bt_lockbuf(rel, buf, access);
992 pg 1009 ECB :
6363 tgl 1010 GIC 12301576 : _bt_checkpage(rel, buf);
6927 1011 12301576 : return buf;
1012 : }
1013 :
1014 : /*
1015 : * _bt_relbuf() -- release a locked buffer.
1016 : *
1017 : * Lock and pin (refcount) are both dropped.
1018 : */
1019 : void
7938 1020 8611806 : _bt_relbuf(Relation rel, Buffer buf)
9770 scrappy 1021 ECB : {
992 pg 1022 GIC 8611806 : _bt_unlockbuf(rel, buf);
1023 8611806 : ReleaseBuffer(buf);
992 pg 1024 CBC 8611806 : }
1025 :
1026 : /*
1027 : * _bt_lockbuf() -- lock a pinned buffer.
1028 : *
1029 : * Lock is acquired without acquiring another pin. This is like a raw
1030 : * LockBuffer() call, but performs extra steps needed by Valgrind.
1031 : *
1032 : * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1033 : * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1034 : */
1035 : void
992 pg 1036 GIC 31159389 : _bt_lockbuf(Relation rel, Buffer buf, int access)
1037 : {
1038 : /* LockBuffer() asserts that pin is held by this backend */
1039 31159389 : LockBuffer(buf, access);
1040 :
1041 : /*
1042 : * It doesn't matter that _bt_unlockbuf() won't get called in the event of
1043 : * an nbtree error (e.g. a unique violation error). That won't cause
697 tgl 1044 ECB : * Valgrind false positives.
1045 : *
1046 : * The nbtree client requests are superimposed on top of the bufmgr.c
1047 : * buffer pin client requests. In the event of an nbtree error the buffer
1048 : * will certainly get marked as defined when the backend once again
1049 : * acquires its first pin on the buffer. (Of course, if the backend never
1050 : * touches the buffer again then it doesn't matter that it remains
1051 : * non-accessible to Valgrind.)
992 pg 1052 : *
1053 : * Note: When an IndexTuple C pointer gets computed using an ItemId read
1054 : * from a page while a lock was held, the C pointer becomes unsafe to
1055 : * dereference forever as soon as the lock is released. Valgrind can only
1056 : * detect cases where the pointer gets dereferenced with no _current_
1057 : * lock/pin held, though.
1058 : */
992 pg 1059 GIC 31159389 : if (!RelationUsesLocalBuffers(rel))
1060 : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
992 pg 1061 CBC 31159389 : }
1062 :
992 pg 1063 ECB : /*
1064 : * _bt_unlockbuf() -- unlock a pinned buffer.
1065 : */
1066 : void
992 pg 1067 GIC 31230172 : _bt_unlockbuf(Relation rel, Buffer buf)
1068 : {
1069 : /*
1070 : * Buffer is pinned and locked, which means that it is expected to be
1071 : * defined and addressable. Check that proactively.
1072 : */
1073 : VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
1074 :
992 pg 1075 ECB : /* LockBuffer() asserts that pin is held by this backend */
992 pg 1076 GIC 31230172 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1077 :
992 pg 1078 CBC 31230172 : if (!RelationUsesLocalBuffers(rel))
992 pg 1079 ECB : VALGRIND_MAKE_MEM_NOACCESS(BufferGetPage(buf), BLCKSZ);
992 pg 1080 GIC 31230172 : }
992 pg 1081 ECB :
1082 : /*
1083 : * _bt_conditionallockbuf() -- conditionally BT_WRITE lock pinned
1084 : * buffer.
1085 : *
1086 : * Note: Caller may need to call _bt_checkpage() with buf when pin on buf
1087 : * wasn't originally acquired in _bt_getbuf() or _bt_relandgetbuf().
1088 : */
1089 : bool
992 pg 1090 GIC 34541 : _bt_conditionallockbuf(Relation rel, Buffer buf)
992 pg 1091 ECB : {
1092 : /* ConditionalLockBuffer() asserts that pin is held by this backend */
992 pg 1093 GIC 34541 : if (!ConditionalLockBuffer(buf))
1094 246 : return false;
1095 :
1096 34295 : if (!RelationUsesLocalBuffers(rel))
1097 : VALGRIND_MAKE_MEM_DEFINED(BufferGetPage(buf), BLCKSZ);
1098 :
1099 34295 : return true;
992 pg 1100 ECB : }
1101 :
1102 : /*
1103 : * _bt_upgradelockbufcleanup() -- upgrade lock to a full cleanup lock.
1104 : */
1105 : void
992 pg 1106 GIC 30039 : _bt_upgradelockbufcleanup(Relation rel, Buffer buf)
1107 : {
1108 : /*
1109 : * Buffer is pinned and locked, which means that it is expected to be
1110 : * defined and addressable. Check that proactively.
992 pg 1111 ECB : */
1112 : VALGRIND_CHECK_MEM_IS_DEFINED(BufferGetPage(buf), BLCKSZ);
1113 :
1114 : /* LockBuffer() asserts that pin is held by this backend */
992 pg 1115 GIC 30039 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1116 30039 : LockBufferForCleanup(buf);
9770 scrappy 1117 30039 : }
1118 :
1119 : /*
1120 : * _bt_pageinit() -- Initialize a new page.
1121 : *
1122 : * On return, the page header is initialized; data space is empty;
1123 : * special space is zeroed out.
1124 : */
1125 : void
1126 190320 : _bt_pageinit(Page page, Size size)
1127 : {
9345 bruce 1128 190320 : PageInit(page, size, sizeof(BTPageOpaqueData));
9770 scrappy 1129 190320 : }
1130 :
1131 : /*
1132 : * Delete item(s) from a btree leaf page during VACUUM.
1133 : *
1134 : * This routine assumes that the caller already has a full cleanup lock on
1135 : * the buffer. Also, the given deletable and updatable arrays *must* be
1138 pg 1136 ECB : * sorted in ascending order.
1137 : *
1138 : * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1139 : * in an existing posting list item are to be removed. This works by
816 1140 : * updating/overwriting an existing item with caller's new version of the item
1141 : * (a version that lacks the TIDs that are to be deleted).
4859 simon 1142 : *
1207 pg 1143 : * We record VACUUMs and b-tree deletes differently in WAL. Deletes must
1144 : * generate their own snapshotConflictHorizon directly from the tableam,
1145 : * whereas VACUUMs rely on the initial VACUUM table scan performing
1146 : * WAL-logging that takes care of the issue for the table's indexes
1147 : * indirectly. Also, we remove the VACUUM cycle ID from pages, which b-tree
816 1148 : * deletes don't do.
1149 : */
1150 : void
4760 simon 1151 CBC 16990 : _bt_delitems_vacuum(Relation rel, Buffer buf,
1138 pg 1152 ECB : OffsetNumber *deletable, int ndeletable,
1153 : BTVacuumPosting *updatable, int nupdatable)
1154 : {
2545 kgrittn 1155 GIC 16990 : Page page = BufferGetPage(buf);
1156 : BTPageOpaque opaque;
816 pg 1157 CBC 16990 : bool needswal = RelationNeedsWAL(rel);
1138 pg 1158 GIC 16990 : char *updatedbuf = NULL;
1159 16990 : Size updatedbuflen = 0;
1160 : OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
1161 :
1162 : /* Shouldn't be called unless there's something to do */
1163 16990 : Assert(ndeletable > 0 || nupdatable > 0);
1164 :
1165 : /* Generate new version of posting lists without deleted TIDs */
816 1166 16990 : if (nupdatable > 0)
1167 2875 : updatedbuf = _bt_delitems_update(updatable, nupdatable,
1168 : updatedoffsets, &updatedbuflen,
1169 : needswal);
1170 :
7202 tgl 1171 ECB : /* No ereport(ERROR) until changes are logged */
8122 tgl 1172 GIC 16990 : START_CRIT_SECTION();
7938 tgl 1173 ECB :
1174 : /*
1175 : * Handle posting tuple updates.
1176 : *
1138 pg 1177 : * Deliberately do this before handling simple deletes. If we did it the
1178 : * other way around (i.e. WAL record order -- simple deletes before
1179 : * updates) then we'd have to make compensating changes to the 'updatable'
1180 : * array of offset numbers.
1138 pg 1181 EUB : *
1182 : * PageIndexTupleOverwrite() won't unset each item's LP_DEAD bit when it
1183 : * happens to already be set. It's important that we not interfere with
1184 : * any future simple index tuple deletion operations.
1185 : */
1138 pg 1186 CBC 91149 : for (int i = 0; i < nupdatable; i++)
1138 pg 1187 ECB : {
1138 pg 1188 GIC 74159 : OffsetNumber updatedoffset = updatedoffsets[i];
1189 : IndexTuple itup;
1190 : Size itemsz;
1191 :
1192 74159 : itup = updatable[i]->itup;
1138 pg 1193 CBC 74159 : itemsz = MAXALIGN(IndexTupleSize(itup));
1194 74159 : if (!PageIndexTupleOverwrite(page, updatedoffset, (Item) itup,
1195 : itemsz))
1138 pg 1196 UIC 0 : elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1197 : BufferGetBlockNumber(buf), RelationGetRelationName(rel));
1198 : }
1199 :
1200 : /* Now handle simple deletes of entire tuples */
1138 pg 1201 GIC 16990 : if (ndeletable > 0)
1202 15774 : PageIndexMultiDelete(page, deletable, ndeletable);
1203 :
6180 tgl 1204 ECB : /*
1205 : * We can clear the vacuum cycle ID since this page has certainly been
6031 bruce 1206 : * processed by the current vacuum scan.
1207 : */
373 michael 1208 GIC 16990 : opaque = BTPageGetOpaque(page);
6180 tgl 1209 CBC 16990 : opaque->btpo_cycleid = 0;
1210 :
1211 : /*
1212 : * Clear the BTP_HAS_GARBAGE page flag.
1213 : *
873 pg 1214 ECB : * This flag indicates the presence of LP_DEAD items on the page (though
816 1215 : * not reliably). Note that we only rely on it with pg_upgrade'd
1216 : * !heapkeyspace indexes. That's why clearing it here won't usually
561 1217 : * interfere with simple index tuple deletion.
6102 tgl 1218 : */
6102 tgl 1219 CBC 16990 : opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
1220 :
4156 simon 1221 16990 : MarkBufferDirty(buf);
6218 tgl 1222 ECB :
1223 : /* XLOG stuff */
816 pg 1224 GIC 16990 : if (needswal)
8222 vadim4o 1225 ECB : {
1226 : XLogRecPtr recptr;
4760 simon 1227 : xl_btree_vacuum xlrec_vacuum;
1228 :
1207 pg 1229 CBC 16989 : xlrec_vacuum.ndeleted = ndeletable;
1138 pg 1230 GIC 16989 : xlrec_vacuum.nupdated = nupdatable;
1231 :
3062 heikki.linnakangas 1232 CBC 16989 : XLogBeginInsert();
3062 heikki.linnakangas 1233 GIC 16989 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
3062 heikki.linnakangas 1234 CBC 16989 : XLogRegisterData((char *) &xlrec_vacuum, SizeOfBtreeVacuum);
1235 :
1138 pg 1236 GIC 16989 : if (ndeletable > 0)
1138 pg 1237 CBC 15773 : XLogRegisterBufData(0, (char *) deletable,
1238 : ndeletable * sizeof(OffsetNumber));
1239 :
1240 16989 : if (nupdatable > 0)
1138 pg 1241 ECB : {
1138 pg 1242 GIC 2875 : XLogRegisterBufData(0, (char *) updatedoffsets,
1138 pg 1243 ECB : nupdatable * sizeof(OffsetNumber));
1138 pg 1244 CBC 2875 : XLogRegisterBufData(0, updatedbuf, updatedbuflen);
1138 pg 1245 ECB : }
1246 :
3062 heikki.linnakangas 1247 GIC 16989 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM);
1248 :
4156 simon 1249 16989 : PageSetLSN(page, recptr);
1250 : }
1251 :
4760 1252 16990 : END_CRIT_SECTION();
1253 :
1254 : /* can't leak memory here */
1138 pg 1255 16990 : if (updatedbuf != NULL)
1256 2875 : pfree(updatedbuf);
1257 : /* free tuples allocated within _bt_delitems_update() */
1258 91149 : for (int i = 0; i < nupdatable; i++)
1259 74159 : pfree(updatable[i]->itup);
4760 simon 1260 16990 : }
1261 :
1262 : /*
1263 : * Delete item(s) from a btree leaf page during single-page cleanup.
1264 : *
1265 : * This routine assumes that the caller has pinned and write locked the
816 pg 1266 ECB : * buffer. Also, the given deletable and updatable arrays *must* be sorted in
1267 : * ascending order.
1268 : *
1269 : * Routine deals with deleting TIDs when some (but not all) of the heap TIDs
1270 : * in an existing posting list item are to be removed. This works by
1271 : * updating/overwriting an existing item with caller's new version of the item
1272 : * (a version that lacks the TIDs that are to be deleted).
4065 tgl 1273 : *
1274 : * This is nearly the same as _bt_delitems_vacuum as far as what it does to
1275 : * the page, but it needs its own snapshotConflictHorizon (caller gets this
1276 : * from tableam). This is used by the REDO routine to generate recovery
1277 : * conflicts. The other difference is that only _bt_delitems_vacuum will
1278 : * clear page's VACUUM cycle ID.
1279 : */
1280 : static void
8 andres 1281 GNC 5226 : _bt_delitems_delete(Relation rel, Relation heaprel, Buffer buf,
1282 : TransactionId snapshotConflictHorizon,
1192 pg 1283 ECB : OffsetNumber *deletable, int ndeletable,
798 1284 : BTVacuumPosting *updatable, int nupdatable)
1285 : {
2545 kgrittn 1286 GIC 5226 : Page page = BufferGetPage(buf);
1287 : BTPageOpaque opaque;
816 pg 1288 5226 : bool needswal = RelationNeedsWAL(rel);
816 pg 1289 CBC 5226 : char *updatedbuf = NULL;
816 pg 1290 GIC 5226 : Size updatedbuflen = 0;
1291 : OffsetNumber updatedoffsets[MaxIndexTuplesPerPage];
4760 simon 1292 ECB :
1293 : /* Shouldn't be called unless there's something to do */
816 pg 1294 CBC 5226 : Assert(ndeletable > 0 || nupdatable > 0);
1295 :
1296 : /* Generate new versions of posting lists without deleted TIDs */
816 pg 1297 GIC 5226 : if (nupdatable > 0)
816 pg 1298 CBC 570 : updatedbuf = _bt_delitems_update(updatable, nupdatable,
816 pg 1299 ECB : updatedoffsets, &updatedbuflen,
1300 : needswal);
1301 :
4760 simon 1302 EUB : /* No ereport(ERROR) until changes are logged */
4760 simon 1303 GIC 5226 : START_CRIT_SECTION();
1304 :
1305 : /* Handle updates and deletes just like _bt_delitems_vacuum */
816 pg 1306 CBC 12179 : for (int i = 0; i < nupdatable; i++)
816 pg 1307 ECB : {
816 pg 1308 GIC 6953 : OffsetNumber updatedoffset = updatedoffsets[i];
1309 : IndexTuple itup;
1310 : Size itemsz;
1311 :
1312 6953 : itup = updatable[i]->itup;
816 pg 1313 CBC 6953 : itemsz = MAXALIGN(IndexTupleSize(itup));
816 pg 1314 GIC 6953 : if (!PageIndexTupleOverwrite(page, updatedoffset, (Item) itup,
1315 : itemsz))
816 pg 1316 UIC 0 : elog(PANIC, "failed to update partially dead item in block %u of index \"%s\"",
1317 : BufferGetBlockNumber(buf), RelationGetRelationName(rel));
1318 : }
1319 :
816 pg 1320 GIC 5226 : if (ndeletable > 0)
1321 5183 : PageIndexMultiDelete(page, deletable, ndeletable);
4760 simon 1322 ECB :
1323 : /*
816 pg 1324 : * Unlike _bt_delitems_vacuum, we *must not* clear the vacuum cycle ID at
1325 : * this point. The VACUUM command alone controls vacuum cycle IDs.
1326 : */
373 michael 1327 CBC 5226 : opaque = BTPageGetOpaque(page);
1328 :
1329 : /*
1330 : * Clear the BTP_HAS_GARBAGE page flag.
1331 : *
873 pg 1332 ECB : * This flag indicates the presence of LP_DEAD items on the page (though
816 1333 : * not reliably). Note that we only rely on it with pg_upgrade'd
873 1334 : * !heapkeyspace indexes.
1335 : */
4760 simon 1336 GIC 5226 : opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
4760 simon 1337 ECB :
4760 simon 1338 CBC 5226 : MarkBufferDirty(buf);
4760 simon 1339 ECB :
1340 : /* XLOG stuff */
816 pg 1341 CBC 5226 : if (needswal)
4760 simon 1342 ECB : {
1343 : XLogRecPtr recptr;
1344 : xl_btree_delete xlrec_delete;
1345 :
7 andres 1346 GNC 5202 : xlrec_delete.isCatalogRel = RelationIsAccessibleInLogicalDecoding(heaprel);
143 pg 1347 5202 : xlrec_delete.snapshotConflictHorizon = snapshotConflictHorizon;
1192 pg 1348 CBC 5202 : xlrec_delete.ndeleted = ndeletable;
816 pg 1349 GIC 5202 : xlrec_delete.nupdated = nupdatable;
4760 simon 1350 ECB :
3062 heikki.linnakangas 1351 GIC 5202 : XLogBeginInsert();
1352 5202 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
3062 heikki.linnakangas 1353 CBC 5202 : XLogRegisterData((char *) &xlrec_delete, SizeOfBtreeDelete);
1354 :
816 pg 1355 5202 : if (ndeletable > 0)
816 pg 1356 GIC 5159 : XLogRegisterBufData(0, (char *) deletable,
1357 : ndeletable * sizeof(OffsetNumber));
816 pg 1358 ECB :
816 pg 1359 GIC 5202 : if (nupdatable > 0)
1360 : {
816 pg 1361 CBC 570 : XLogRegisterBufData(0, (char *) updatedoffsets,
816 pg 1362 ECB : nupdatable * sizeof(OffsetNumber));
816 pg 1363 GIC 570 : XLogRegisterBufData(0, updatedbuf, updatedbuflen);
816 pg 1364 ECB : }
4760 simon 1365 :
3062 heikki.linnakangas 1366 CBC 5202 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE);
1367 :
8222 vadim4o 1368 GIC 5202 : PageSetLSN(page, recptr);
1369 : }
1370 :
7938 tgl 1371 5226 : END_CRIT_SECTION();
1372 :
1373 : /* can't leak memory here */
816 pg 1374 5226 : if (updatedbuf != NULL)
1375 570 : pfree(updatedbuf);
1376 : /* free tuples allocated within _bt_delitems_update() */
1377 12179 : for (int i = 0; i < nupdatable; i++)
1378 6953 : pfree(updatable[i]->itup);
9770 scrappy 1379 5226 : }
1380 :
1381 : /*
1382 : * Set up state needed to delete TIDs from posting list tuples via "updating"
1383 : * the tuple. Performs steps common to both _bt_delitems_vacuum and
1384 : * _bt_delitems_delete. These steps must take place before each function's
1385 : * critical section begins.
1386 : *
1387 : * updatable and nupdatable are inputs, though note that we will use
1388 : * _bt_update_posting() to replace the original itup with a pointer to a final
816 pg 1389 ECB : * version in palloc()'d memory. Caller should free the tuples when its done.
1390 : *
1391 : * The first nupdatable entries from updatedoffsets are set to the page offset
1392 : * number for posting list tuples that caller updates. This is mostly useful
1393 : * because caller may need to WAL-log the page offsets (though we always do
1394 : * this for caller out of convenience).
1395 : *
1396 : * Returns buffer consisting of an array of xl_btree_update structs that
1397 : * describe the steps we perform here for caller (though only when needswal is
1398 : * true). Also sets *updatedbuflen to the final size of the buffer. This
1399 : * buffer is used by caller when WAL logging is required.
1400 : */
1401 : static char *
816 pg 1402 GIC 3445 : _bt_delitems_update(BTVacuumPosting *updatable, int nupdatable,
1403 : OffsetNumber *updatedoffsets, Size *updatedbuflen,
1404 : bool needswal)
1138 pg 1405 ECB : {
816 pg 1406 GIC 3445 : char *updatedbuf = NULL;
1407 3445 : Size buflen = 0;
816 pg 1408 ECB :
1409 : /* Shouldn't be called unless there's something to do */
816 pg 1410 GIC 3445 : Assert(nupdatable > 0);
1411 :
816 pg 1412 CBC 84557 : for (int i = 0; i < nupdatable; i++)
1413 : {
816 pg 1414 GIC 81112 : BTVacuumPosting vacposting = updatable[i];
1415 : Size itemsz;
1138 pg 1416 ECB :
1417 : /* Replace work area IndexTuple with updated version */
816 pg 1418 CBC 81112 : _bt_update_posting(vacposting);
1419 :
1420 : /* Keep track of size of xl_btree_update for updatedbuf in passing */
1421 81112 : itemsz = SizeOfBtreeUpdate + vacposting->ndeletedtids * sizeof(uint16);
1422 81112 : buflen += itemsz;
1138 pg 1423 ECB :
1424 : /* Build updatedoffsets buffer in passing */
816 pg 1425 CBC 81112 : updatedoffsets[i] = vacposting->updatedoffset;
1426 : }
1427 :
1428 : /* XLOG stuff */
1429 3445 : if (needswal)
816 pg 1430 ECB : {
816 pg 1431 GIC 3445 : Size offset = 0;
816 pg 1432 ECB :
1433 : /* Allocate, set final size for caller */
816 pg 1434 CBC 3445 : updatedbuf = palloc(buflen);
1435 3445 : *updatedbuflen = buflen;
1436 84557 : for (int i = 0; i < nupdatable; i++)
1437 : {
816 pg 1438 GIC 81112 : BTVacuumPosting vacposting = updatable[i];
1439 : Size itemsz;
816 pg 1440 ECB : xl_btree_update update;
1441 :
816 pg 1442 GIC 81112 : update.ndeletedtids = vacposting->ndeletedtids;
1443 81112 : memcpy(updatedbuf + offset, &update.ndeletedtids,
1444 : SizeOfBtreeUpdate);
1445 81112 : offset += SizeOfBtreeUpdate;
1446 :
1447 81112 : itemsz = update.ndeletedtids * sizeof(uint16);
816 pg 1448 CBC 81112 : memcpy(updatedbuf + offset, vacposting->deletetids, itemsz);
816 pg 1449 GIC 81112 : offset += itemsz;
1138 pg 1450 ECB : }
816 1451 : }
1452 :
816 pg 1453 CBC 3445 : return updatedbuf;
816 pg 1454 ECB : }
1455 :
1456 : /*
1457 : * Comparator used by _bt_delitems_delete_check() to restore deltids array
816 pg 1458 EUB : * back to its original leaf-page-wise sort order
1459 : */
1460 : static int
816 pg 1461 GIC 2377018 : _bt_delitems_cmp(const void *a, const void *b)
1462 : {
1463 2377018 : TM_IndexDelete *indexdelete1 = (TM_IndexDelete *) a;
1464 2377018 : TM_IndexDelete *indexdelete2 = (TM_IndexDelete *) b;
1465 :
1466 2377018 : if (indexdelete1->id > indexdelete2->id)
1467 1149452 : return 1;
1468 1227566 : if (indexdelete1->id < indexdelete2->id)
1469 1227566 : return -1;
1470 :
816 pg 1471 UIC 0 : Assert(false);
1472 :
1473 : return 0;
1474 : }
1475 :
1476 : /*
1477 : * Try to delete item(s) from a btree leaf page during single-page cleanup.
1478 : *
1479 : * nbtree interface to table_index_delete_tuples(). Deletes a subset of index
1480 : * tuples from caller's deltids array: those whose TIDs are found safe to
1481 : * delete by the tableam (or already marked LP_DEAD in index, and so already
1482 : * known to be deletable by our simple index deletion caller). We physically
1483 : * delete index tuples from buf leaf page last of all (for index tuples where
1484 : * that is known to be safe following our table_index_delete_tuples() call).
1485 : *
1486 : * Simple index deletion caller only includes TIDs from index tuples marked
1487 : * LP_DEAD, as well as extra TIDs it found on the same leaf page that can be
1488 : * included without increasing the total number of distinct table blocks for
1489 : * the deletion operation as a whole. This approach often allows us to delete
1490 : * some extra index tuples that were practically free for tableam to check in
1491 : * passing (when they actually turn out to be safe to delete). It probably
1492 : * only makes sense for the tableam to go ahead with these extra checks when
1493 : * it is block-oriented (otherwise the checks probably won't be practically
1494 : * free, which we rely on). The tableam interface requires the tableam side
1495 : * to handle the problem, though, so this is okay (we as an index AM are free
1496 : * to make the simplifying assumption that all tableams must be block-based).
1497 : *
1498 : * Bottom-up index deletion caller provides all the TIDs from the leaf page,
1499 : * without expecting that tableam will check most of them. The tableam has
1500 : * considerable discretion around which entries/blocks it checks. Our role in
1501 : * costing the bottom-up deletion operation is strictly advisory.
816 pg 1502 ECB : *
1503 : * Note: Caller must have added deltids entries (i.e. entries that go in
1504 : * delstate's main array) in leaf-page-wise order: page offset number order,
1505 : * TID order among entries taken from the same posting list tuple (tiebreak on
1506 : * TID). This order is convenient to work with here.
1507 : *
1508 : * Note: We also rely on the id field of each deltids element "capturing" this
1509 : * original leaf-page-wise order. That is, we expect to be able to get back
1510 : * to the original leaf-page-wise order just by sorting deltids on the id
1511 : * field (tableam will sort deltids for its own reasons, so we'll need to put
1512 : * it back in leaf-page-wise order afterwards).
1513 : */
1514 : void
816 pg 1515 GIC 8281 : _bt_delitems_delete_check(Relation rel, Buffer buf, Relation heapRel,
1516 : TM_IndexDeleteOp *delstate)
816 pg 1517 ECB : {
816 pg 1518 CBC 8281 : Page page = BufferGetPage(buf);
1519 : TransactionId snapshotConflictHorizon;
816 pg 1520 GIC 8281 : OffsetNumber postingidxoffnum = InvalidOffsetNumber;
1521 8281 : int ndeletable = 0,
1522 8281 : nupdatable = 0;
1523 : OffsetNumber deletable[MaxIndexTuplesPerPage];
1524 : BTVacuumPosting updatable[MaxIndexTuplesPerPage];
1525 :
1526 : /* Use tableam interface to determine which tuples to delete first */
143 pg 1527 GNC 8281 : snapshotConflictHorizon = table_index_delete_tuples(heapRel, delstate);
1528 :
1529 : /* Should not WAL-log snapshotConflictHorizon unless it's required */
1530 8281 : if (!XLogStandbyInfoActive())
1531 943 : snapshotConflictHorizon = InvalidTransactionId;
1532 :
816 pg 1533 ECB : /*
1534 : * Construct a leaf-page-wise description of what _bt_delitems_delete()
1535 : * needs to do to physically delete index tuples from the page.
1536 : *
1537 : * Must sort deltids array to restore leaf-page-wise order (original order
1538 : * before call to tableam). This is the order that the loop expects.
1539 : *
1540 : * Note that deltids array might be a lot smaller now. It might even have
1541 : * no entries at all (with bottom-up deletion caller), in which case there
1542 : * is nothing left to do.
1543 : */
816 pg 1544 CBC 8281 : qsort(delstate->deltids, delstate->ndeltids, sizeof(TM_IndexDelete),
816 pg 1545 ECB : _bt_delitems_cmp);
816 pg 1546 GIC 8281 : if (delstate->ndeltids == 0)
1547 : {
1548 3055 : Assert(delstate->bottomup);
1549 3055 : return;
816 pg 1550 ECB : }
1551 :
1552 : /* We definitely have to delete at least one index tuple (or one TID) */
816 pg 1553 GIC 359370 : for (int i = 0; i < delstate->ndeltids; i++)
1554 : {
1555 354144 : TM_IndexStatus *dstatus = delstate->status + delstate->deltids[i].id;
1556 354144 : OffsetNumber idxoffnum = dstatus->idxoffnum;
1557 354144 : ItemId itemid = PageGetItemId(page, idxoffnum);
816 pg 1558 CBC 354144 : IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
816 pg 1559 ECB : int nestedi,
1560 : nitem;
1561 : BTVacuumPosting vacposting;
1562 :
816 pg 1563 CBC 354144 : Assert(OffsetNumberIsValid(idxoffnum));
1564 :
816 pg 1565 GIC 354144 : if (idxoffnum == postingidxoffnum)
816 pg 1566 ECB : {
1567 : /*
1568 : * This deltid entry is a TID from a posting list tuple that has
1569 : * already been completely processed
1570 : */
816 pg 1571 CBC 13222 : Assert(BTreeTupleIsPosting(itup));
1572 13222 : Assert(ItemPointerCompare(BTreeTupleGetHeapTID(itup),
1573 : &delstate->deltids[i].tid) < 0);
816 pg 1574 GIC 13222 : Assert(ItemPointerCompare(BTreeTupleGetMaxHeapTID(itup),
1575 : &delstate->deltids[i].tid) >= 0);
1576 13222 : continue;
1577 : }
1578 :
1579 340922 : if (!BTreeTupleIsPosting(itup))
1580 : {
1581 : /* Plain non-pivot tuple */
1582 327790 : Assert(ItemPointerEquals(&itup->t_tid, &delstate->deltids[i].tid));
816 pg 1583 CBC 327790 : if (dstatus->knowndeletable)
1584 240804 : deletable[ndeletable++] = idxoffnum;
1585 327790 : continue;
816 pg 1586 ECB : }
1587 :
1588 : /*
1589 : * itup is a posting list tuple whose lowest deltids entry (which may
1590 : * or may not be for the first TID from itup) is considered here now.
1591 : * We should process all of the deltids entries for the posting list
1592 : * together now, though (not just the lowest). Remember to skip over
1593 : * later itup-related entries during later iterations of outermost
1594 : * loop.
1595 : */
816 pg 1596 GIC 13132 : postingidxoffnum = idxoffnum; /* Remember work in outermost loop */
1597 13132 : nestedi = i; /* Initialize for first itup deltids entry */
816 pg 1598 CBC 13132 : vacposting = NULL; /* Describes final action for itup */
816 pg 1599 GIC 13132 : nitem = BTreeTupleGetNPosting(itup);
816 pg 1600 CBC 74621 : for (int p = 0; p < nitem; p++)
1138 pg 1601 ECB : {
816 pg 1602 GIC 61489 : ItemPointer ptid = BTreeTupleGetPostingN(itup, p);
1603 61489 : int ptidcmp = -1;
1138 pg 1604 ECB :
816 1605 : /*
1606 : * This nested loop reuses work across ptid TIDs taken from itup.
1607 : * We take advantage of the fact that both itup's TIDs and deltids
1608 : * entries (within a single itup/posting list grouping) must both
1609 : * be in ascending TID order.
1610 : */
816 pg 1611 GIC 82411 : for (; nestedi < delstate->ndeltids; nestedi++)
1612 : {
816 pg 1613 CBC 68116 : TM_IndexDelete *tcdeltid = &delstate->deltids[nestedi];
1614 68116 : TM_IndexStatus *tdstatus = (delstate->status + tcdeltid->id);
1615 :
1616 : /* Stop once we get past all itup related deltids entries */
1617 68116 : Assert(tdstatus->idxoffnum >= idxoffnum);
816 pg 1618 GIC 68116 : if (tdstatus->idxoffnum != idxoffnum)
1619 18387 : break;
1620 :
1621 : /* Skip past non-deletable itup related entries up front */
816 pg 1622 CBC 49729 : if (!tdstatus->knowndeletable)
1623 5190 : continue;
1624 :
1625 : /* Entry is first partial ptid match (or an exact match)? */
1626 44539 : ptidcmp = ItemPointerCompare(&tcdeltid->tid, ptid);
816 pg 1627 GIC 44539 : if (ptidcmp >= 0)
816 pg 1628 ECB : {
1629 : /* Greater than or equal (partial or exact) match... */
816 pg 1630 CBC 28807 : break;
816 pg 1631 ECB : }
1138 1632 : }
1633 :
816 1634 : /* ...exact ptid match to a deletable deltids entry? */
816 pg 1635 GIC 61489 : if (ptidcmp != 0)
1636 40325 : continue;
1637 :
1638 : /* Exact match for deletable deltids entry -- ptid gets deleted */
816 pg 1639 CBC 21164 : if (vacposting == NULL)
1640 : {
816 pg 1641 GIC 11994 : vacposting = palloc(offsetof(BTVacuumPostingData, deletetids) +
1642 : nitem * sizeof(uint16));
816 pg 1643 CBC 11994 : vacposting->itup = itup;
816 pg 1644 GIC 11994 : vacposting->updatedoffset = idxoffnum;
1645 11994 : vacposting->ndeletedtids = 0;
1138 pg 1646 ECB : }
816 pg 1647 GIC 21164 : vacposting->deletetids[vacposting->ndeletedtids++] = p;
1138 pg 1648 ECB : }
1649 :
1650 : /* Final decision on itup, a posting list tuple */
1651 :
816 pg 1652 GIC 13132 : if (vacposting == NULL)
816 pg 1653 ECB : {
1654 : /* No TIDs to delete from itup -- do nothing */
1655 : }
816 pg 1656 GIC 11994 : else if (vacposting->ndeletedtids == nitem)
1657 : {
1658 : /* Straight delete of itup (to delete all TIDs) */
1659 5041 : deletable[ndeletable++] = idxoffnum;
816 pg 1660 ECB : /* Turns out we won't need granular information */
816 pg 1661 GIC 5041 : pfree(vacposting);
1662 : }
1663 : else
816 pg 1664 ECB : {
1665 : /* Delete some (but not all) TIDs from itup */
816 pg 1666 GIC 6953 : Assert(vacposting->ndeletedtids > 0 &&
1667 : vacposting->ndeletedtids < nitem);
1668 6953 : updatable[nupdatable++] = vacposting;
1669 : }
1670 : }
1671 :
1672 : /* Physically delete tuples (or TIDs) using deletable (or updatable) */
8 andres 1673 GNC 5226 : _bt_delitems_delete(rel, heapRel, buf, snapshotConflictHorizon, deletable,
1674 : ndeletable, updatable, nupdatable);
1675 :
1676 : /* be tidy */
816 pg 1677 GIC 12179 : for (int i = 0; i < nupdatable; i++)
1678 6953 : pfree(updatable[i]);
1679 : }
1680 :
1681 : /*
1067 pg 1682 ECB : * Check that leftsib page (the btpo_prev of target page) is not marked with
1683 : * INCOMPLETE_SPLIT flag. Used during page deletion.
1684 : *
1685 : * Returning true indicates that page flag is set in leftsib (which is
1686 : * definitely still the left sibling of target). When that happens, the
1687 : * target doesn't have a downlink in parent, and the page deletion algorithm
1688 : * isn't prepared to handle that. Deletion of the target page (or the whole
1689 : * subtree that contains the target page) cannot take place.
1690 : *
1063 1691 : * Caller should not have a lock on the target page itself, since pages on the
1692 : * same level must always be locked left to right to avoid deadlocks.
1693 : */
1067 1694 : static bool
8 andres 1695 GNC 2757 : _bt_leftsib_splitflag(Relation rel, Relation heaprel, BlockNumber leftsib,
1696 : BlockNumber target)
1067 pg 1697 ECB : {
1698 : Buffer buf;
1699 : Page page;
1700 : BTPageOpaque opaque;
1701 : bool result;
1702 :
1703 : /* Easy case: No left sibling */
1067 pg 1704 GIC 2757 : if (leftsib == P_NONE)
1705 2261 : return false;
1706 :
8 andres 1707 GNC 496 : buf = _bt_getbuf(rel, heaprel, leftsib, BT_READ);
1067 pg 1708 CBC 496 : page = BufferGetPage(buf);
373 michael 1709 GIC 496 : opaque = BTPageGetOpaque(page);
1067 pg 1710 ECB :
1711 : /*
1712 : * If the left sibling was concurrently split, so that its next-pointer
1713 : * doesn't point to the current page anymore, the split that created
1714 : * target must be completed. Caller can reasonably expect that there will
1715 : * be a downlink to the target page that it can relocate using its stack.
1716 : * (We don't allow splitting an incompletely split page again until the
1717 : * previous split has been completed.)
1718 : */
1067 pg 1719 GIC 496 : result = (opaque->btpo_next == target && P_INCOMPLETE_SPLIT(opaque));
1720 496 : _bt_relbuf(rel, buf);
1721 :
1722 496 : return result;
1723 : }
1724 :
1725 : /*
1726 : * Check that leafrightsib page (the btpo_next of target leaf page) is not
1727 : * marked with ISHALFDEAD flag. Used during page deletion.
1728 : *
1729 : * Returning true indicates that page flag is set in leafrightsib, so page
1730 : * deletion cannot go ahead. Our caller is not prepared to deal with the case
1731 : * where the parent page does not have a pivot tuples whose downlink points to
1732 : * leafrightsib (due to an earlier interrupted VACUUM operation). It doesn't
1733 : * seem worth going to the trouble of teaching our caller to deal with it.
1734 : * The situation will be resolved after VACUUM finishes the deletion of the
1735 : * half-dead page (when a future VACUUM operation reaches the target page
1736 : * again).
1737 : *
1738 : * _bt_leftsib_splitflag() is called for both leaf pages and internal pages.
1739 : * _bt_rightsib_halfdeadflag() is only called for leaf pages, though. This is
1740 : * okay because of the restriction on deleting pages that are the rightmost
1067 pg 1741 ECB : * page of their parent (i.e. that such deletions can only take place when the
1742 : * entire subtree must be deleted). The leaf level check made here will apply
1743 : * to a right "cousin" leaf page rather than a simple right sibling leaf page
1744 : * in cases where caller actually goes on to attempt deleting pages that are
1745 : * above the leaf page. The right cousin leaf page is representative of the
1746 : * left edge of the subtree to the right of the to-be-deleted subtree as a
1747 : * whole, which is exactly the condition that our caller cares about.
1748 : * (Besides, internal pages are never marked half-dead, so it isn't even
1749 : * possible to _directly_ assess if an internal page is part of some other
1750 : * to-be-deleted subtree.)
3241 heikki.linnakangas 1751 : */
1752 : static bool
8 andres 1753 GNC 2670 : _bt_rightsib_halfdeadflag(Relation rel, Relation heaprel, BlockNumber leafrightsib)
3241 heikki.linnakangas 1754 ECB : {
1755 : Buffer buf;
1756 : Page page;
1757 : BTPageOpaque opaque;
1758 : bool result;
1759 :
1067 pg 1760 GIC 2670 : Assert(leafrightsib != P_NONE);
1761 :
8 andres 1762 GNC 2670 : buf = _bt_getbuf(rel, heaprel, leafrightsib, BT_READ);
2545 kgrittn 1763 GIC 2670 : page = BufferGetPage(buf);
373 michael 1764 2670 : opaque = BTPageGetOpaque(page);
1765 :
1067 pg 1766 2670 : Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque));
3241 heikki.linnakangas 1767 2670 : result = P_ISHALFDEAD(opaque);
1768 2670 : _bt_relbuf(rel, buf);
1769 :
1770 2670 : return result;
1771 : }
1772 :
1773 : /*
1774 : * _bt_pagedel() -- Delete a leaf page from the b-tree, if legal to do so.
1775 : *
1776 : * This action unlinks the leaf page from the b-tree structure, removing all
1777 : * pointers leading to it --- but not touching its own left and right links.
1778 : * The page cannot be physically reclaimed right away, since other processes
1779 : * may currently be trying to follow links leading to the page; they have to
1780 : * be allowed to use its right-link to recover. See nbtree/README.
1781 : *
1782 : * On entry, the target buffer must be pinned and locked (either read or write
1783 : * lock is OK). The page must be an empty leaf page, which may be half-dead
1784 : * already (a half-dead page should only be passed to us when an earlier
1785 : * VACUUM operation was interrupted, though). Note in particular that caller
1786 : * should never pass a buffer containing an existing deleted page here. The
1787 : * lock and pin on caller's buffer will be dropped before we return.
1788 : *
1789 : * Maintains bulk delete stats for caller, which are taken from vstate. We
1790 : * need to cooperate closely with caller here so that whole VACUUM operation
773 pg 1791 ECB : * reliably avoids any double counting of subsidiary-to-leafbuf pages that we
1792 : * delete in passing. If such pages happen to be from a block number that is
1793 : * ahead of the current scanblkno position, then caller is expected to count
1794 : * them directly later on. It's simpler for us to understand caller's
1795 : * requirements than it would be for caller to understand when or how a
1796 : * deleted page became deleted after the fact.
1797 : *
1798 : * NOTE: this leaks memory. Rather than trying to clean up everything
1799 : * carefully, it's better to run it in a temp context that can be reset
1800 : * frequently.
1801 : */
1802 : void
773 pg 1803 CBC 2730 : _bt_pagedel(Relation rel, Buffer leafbuf, BTVacState *vstate)
1804 : {
1805 : BlockNumber rightsib;
1806 : bool rightsib_empty;
1807 : Page page;
1808 : BTPageOpaque opaque;
1809 :
1810 : /*
1811 : * Save original leafbuf block number from caller. Only deleted blocks
1812 : * that are <= scanblkno are added to bulk delete stat's pages_deleted
1813 : * count.
1814 : */
1073 pg 1815 GIC 2730 : BlockNumber scanblkno = BufferGetBlockNumber(leafbuf);
1073 pg 1816 ECB :
1817 : /*
1818 : * "stack" is a search stack leading (approximately) to the target page.
1819 : * It is initially NULL, but when iterating, we keep it to avoid
3313 heikki.linnakangas 1820 : * duplicated search effort.
3309 1821 : *
1822 : * Also, when "stack" is not NULL, we have already checked that the
1823 : * current page is not the right half of an incomplete split, i.e. the
1824 : * left sibling does not have its INCOMPLETE_SPLIT flag set, including
1825 : * when the current target page is to the right of caller's initial page
1826 : * (the scanblkno page).
1827 : */
3313 heikki.linnakangas 1828 GIC 2730 : BTStack stack = NULL;
1829 :
1830 : for (;;)
1831 : {
1073 pg 1832 5400 : page = BufferGetPage(leafbuf);
373 michael 1833 CBC 5400 : opaque = BTPageGetOpaque(page);
6003 tgl 1834 ECB :
1835 : /*
1836 : * Internal pages are never deleted directly, only as part of deleting
1837 : * the whole subtree all the way down to leaf level.
1838 : *
1839 : * Also check for deleted pages here. Caller never passes us a fully
1840 : * deleted page. Only VACUUM can delete pages, so there can't have
1841 : * been a concurrent deletion. Assume that we reached any deleted
1842 : * page encountered here by following a sibling link, and that the
1843 : * index is corrupt.
1844 : */
1073 pg 1845 GIC 5400 : Assert(!P_ISDELETED(opaque));
1846 5400 : if (!P_ISLEAF(opaque) || P_ISDELETED(opaque))
3313 heikki.linnakangas 1847 EUB : {
1848 : /*
1849 : * Pre-9.4 page deletion only marked internal pages as half-dead,
1850 : * but now we only use that flag on leaf pages. The old algorithm
1851 : * was never supposed to leave half-dead pages in the tree, it was
1852 : * just a transient state, but it was nevertheless possible in
1853 : * error scenarios. We don't know how to deal with them here. They
3260 bruce 1854 : * are harmless as far as searches are considered, but inserts
1855 : * into the deleted keyspace could add out-of-order downlinks in
1856 : * the upper levels. Log a notice, hopefully the admin will notice
1857 : * and reindex.
1858 : */
3313 heikki.linnakangas 1859 UIC 0 : if (P_ISHALFDEAD(opaque))
1860 0 : ereport(LOG,
1861 : (errcode(ERRCODE_INDEX_CORRUPTED),
2118 tgl 1862 EUB : errmsg("index \"%s\" contains a half-dead internal page",
2118 tgl 1863 ECB : RelationGetRelationName(rel)),
1864 : errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
1865 :
1073 pg 1866 UIC 0 : if (P_ISDELETED(opaque))
1867 0 : ereport(LOG,
1868 : (errcode(ERRCODE_INDEX_CORRUPTED),
1869 : errmsg_internal("found deleted block %u while following right link from block %u in index \"%s\"",
1870 : BufferGetBlockNumber(leafbuf),
1871 : scanblkno,
1872 : RelationGetRelationName(rel))));
1873 :
1874 0 : _bt_relbuf(rel, leafbuf);
773 pg 1875 GIC 62 : return;
1876 : }
1877 :
1878 : /*
1879 : * We can never delete rightmost pages nor root pages. While at it,
1880 : * check that page is empty, since it's possible that the leafbuf page
1881 : * was empty a moment ago, but has since had some inserts.
1882 : *
1883 : * To keep the algorithm simple, we also never delete an incompletely
1884 : * split page (they should be rare enough that this doesn't make any
3309 heikki.linnakangas 1885 ECB : * meaningful difference to disk usage):
1886 : *
1887 : * The INCOMPLETE_SPLIT flag on the page tells us if the page is the
1888 : * left half of an incomplete split, but ensuring that it's not the
1889 : * right half is more complicated. For that, we have to check that
1067 pg 1890 : * the left sibling doesn't have its INCOMPLETE_SPLIT flag set using
1891 : * _bt_leftsib_splitflag(). On the first iteration, we temporarily
1892 : * release the lock on scanblkno/leafbuf, check the left sibling, and
1893 : * construct a search stack to scanblkno. On subsequent iterations,
1894 : * we know we stepped right from a page that passed these tests, so
1895 : * it's OK.
1896 : */
1073 pg 1897 GIC 5400 : if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) ||
3309 heikki.linnakangas 1898 5339 : P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
1899 5339 : P_INCOMPLETE_SPLIT(opaque))
1900 : {
3313 heikki.linnakangas 1901 ECB : /* Should never fail to delete a half-dead page */
3313 heikki.linnakangas 1902 GIC 61 : Assert(!P_ISHALFDEAD(opaque));
1903 :
1073 pg 1904 61 : _bt_relbuf(rel, leafbuf);
773 1905 61 : return;
1906 : }
1907 :
1908 : /*
1909 : * First, remove downlink pointing to the page (or a parent of the
1910 : * page, if we are going to delete a taller subtree), and mark the
1911 : * leafbuf page half-dead
1912 : */
3313 heikki.linnakangas 1913 CBC 5339 : if (!P_ISHALFDEAD(opaque))
6003 tgl 1914 ECB : {
1915 : /*
1916 : * We need an approximate pointer to the page's parent page. We
1917 : * use a variant of the standard search mechanism to search for
1918 : * the page's high key; this will give us a link to either the
1919 : * current parent or someplace to its left (if there are multiple
1920 : * equal high keys, which is possible with !heapkeyspace indexes).
1921 : *
3309 heikki.linnakangas 1922 : * Also check if this is the right-half of an incomplete split
1923 : * (see comment above).
1924 : */
3313 heikki.linnakangas 1925 CBC 5339 : if (!stack)
1926 2669 : {
1927 : BTScanInsert itup_key;
1928 : ItemId itemid;
1929 : IndexTuple targetkey;
1930 : BlockNumber leftsib,
1931 : leafblkno;
1063 pg 1932 ECB : Buffer sleafbuf;
1933 :
3313 heikki.linnakangas 1934 GIC 2669 : itemid = PageGetItemId(page, P_HIKEY);
1935 2669 : targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
1936 :
3309 1937 2669 : leftsib = opaque->btpo_prev;
1063 pg 1938 CBC 2669 : leafblkno = BufferGetBlockNumber(leafbuf);
3309 heikki.linnakangas 1939 ECB :
1940 : /*
3313 heikki.linnakangas 1941 EUB : * To avoid deadlocks, we'd better drop the leaf page lock
1942 : * before going further.
1943 : */
992 pg 1944 GIC 2669 : _bt_unlockbuf(rel, leafbuf);
1945 :
3309 heikki.linnakangas 1946 ECB : /*
1947 : * Check that the left sibling of leafbuf (if any) is not
1067 pg 1948 : * marked with INCOMPLETE_SPLIT flag before proceeding
3309 heikki.linnakangas 1949 : */
1063 pg 1950 GIC 2669 : Assert(leafblkno == scanblkno);
8 andres 1951 GNC 2669 : if (_bt_leftsib_splitflag(rel, vstate->info->heaprel, leftsib, leafblkno))
3309 heikki.linnakangas 1952 ECB : {
1067 pg 1953 UIC 0 : ReleaseBuffer(leafbuf);
773 1954 0 : return;
1955 : }
1956 :
1957 : /* we need an insertion scan key for the search, so build one */
8 andres 1958 GNC 2669 : itup_key = _bt_mkscankey(rel, vstate->info->heaprel, targetkey);
1959 : /* find the leftmost leaf page with matching pivot/high key */
1481 pg 1960 GIC 2669 : itup_key->pivotsearch = true;
8 andres 1961 GNC 2669 : stack = _bt_search(rel, vstate->info->heaprel, itup_key,
1962 : &sleafbuf, BT_READ, NULL);
1963 : /* won't need a second lock or pin on leafbuf */
1063 pg 1964 GIC 2669 : _bt_relbuf(rel, sleafbuf);
1965 :
1966 : /*
1967 : * Re-lock the leaf page, and start over to use our stack
1968 : * within _bt_mark_page_halfdead. We must do it that way
1969 : * because it's possible that leafbuf can no longer be
1073 pg 1970 ECB : * deleted. We need to recheck.
1055 1971 : *
1972 : * Note: We can't simply hold on to the sleafbuf lock instead,
1973 : * because it's barely possible that sleafbuf is not the same
1974 : * page as leafbuf. This happens when leafbuf split after our
1975 : * original lock was dropped, but before _bt_search finished
1976 : * its descent. We rely on the assumption that we'll find
1977 : * leafbuf isn't safe to delete anymore in this scenario.
1978 : * (Page deletion can cope with the stack being to the left of
1979 : * leafbuf, but not to the right of leafbuf.)
1980 : */
992 pg 1981 CBC 2669 : _bt_lockbuf(rel, leafbuf, BT_WRITE);
3313 heikki.linnakangas 1982 2669 : continue;
1983 : }
3313 heikki.linnakangas 1984 ECB :
1073 pg 1985 : /*
1986 : * See if it's safe to delete the leaf page, and determine how
1987 : * many parent/internal pages above the leaf level will be
1988 : * deleted. If it's safe then _bt_mark_page_halfdead will also
1989 : * perform the first phase of deletion, which includes marking the
1990 : * leafbuf page half-dead.
1991 : */
1073 pg 1992 GIC 2670 : Assert(P_ISLEAF(opaque) && !P_IGNORE(opaque));
8 andres 1993 GNC 2670 : if (!_bt_mark_page_halfdead(rel, vstate->info->heaprel, leafbuf, stack))
6003 tgl 1994 ECB : {
1073 pg 1995 CBC 1 : _bt_relbuf(rel, leafbuf);
773 1996 1 : return;
1997 : }
1998 : }
3313 heikki.linnakangas 1999 ECB :
2000 : /*
2001 : * Then unlink it from its siblings. Each call to
2002 : * _bt_unlink_halfdead_page unlinks the topmost page from the subtree,
2003 : * making it shallower. Iterate until the leafbuf page is deleted.
2004 : */
3313 heikki.linnakangas 2005 GIC 2669 : rightsib_empty = false;
1073 pg 2006 2669 : Assert(P_ISLEAF(opaque) && P_ISHALFDEAD(opaque));
3313 heikki.linnakangas 2007 5424 : while (P_ISHALFDEAD(opaque))
2008 : {
2009 : /* Check for interrupts in _bt_unlink_halfdead_page */
1073 pg 2010 GBC 2755 : if (!_bt_unlink_halfdead_page(rel, leafbuf, scanblkno,
2011 : &rightsib_empty, vstate))
2012 : {
2013 : /*
2014 : * _bt_unlink_halfdead_page should never fail, since we
794 pg 2015 ECB : * established that deletion is generally safe in
2016 : * _bt_mark_page_halfdead -- index must be corrupt.
2017 : *
2018 : * Note that _bt_unlink_halfdead_page already released the
2019 : * lock and pin on leafbuf for us.
2020 : */
794 pg 2021 UIC 0 : Assert(false);
2022 : return;
2023 : }
2024 : }
3313 heikki.linnakangas 2025 ECB :
1073 pg 2026 GIC 2669 : Assert(P_ISLEAF(opaque) && P_ISDELETED(opaque));
2027 :
3313 heikki.linnakangas 2028 2669 : rightsib = opaque->btpo_next;
2029 :
1073 pg 2030 2669 : _bt_relbuf(rel, leafbuf);
2031 :
2032 : /*
2033 : * Check here, as calling loops will have locks held, preventing
2034 : * interrupts from being processed.
2035 : */
1740 andres 2036 2669 : CHECK_FOR_INTERRUPTS();
2037 :
2038 : /*
2039 : * The page has now been deleted. If its right sibling is completely
2040 : * empty, it's possible that the reason we haven't deleted it earlier
2041 : * is that it was the rightmost child of the parent. Now that we
3313 heikki.linnakangas 2042 ECB : * removed the downlink for this page, the right sibling might now be
2043 : * the only child of the parent, and could be removed. It would be
2044 : * picked up by the next vacuum anyway, but might as well try to
3260 bruce 2045 : * remove it now, so loop back to process the right sibling.
2046 : *
2047 : * Note: This relies on the assumption that _bt_getstackbuf() will be
2048 : * able to reuse our original descent stack with a different child
2049 : * block (provided that the child block is to the right of the
2050 : * original leaf page reached by _bt_search()). It will even update
2051 : * the descent stack each time we loop around, avoiding repeated work.
2052 : */
3313 heikki.linnakangas 2053 GIC 2669 : if (!rightsib_empty)
2054 2668 : break;
2055 :
8 andres 2056 GNC 1 : leafbuf = _bt_getbuf(rel, vstate->info->heaprel, rightsib, BT_WRITE);
2057 : }
2058 : }
2059 :
2060 : /*
2061 : * First stage of page deletion.
2062 : *
2063 : * Establish the height of the to-be-deleted subtree with leafbuf at its
1063 pg 2064 ECB : * lowest level, remove the downlink to the subtree, and mark leafbuf
2065 : * half-dead. The final to-be-deleted subtree is usually just leafbuf itself,
2066 : * but may include additional internal pages (at most one per level of the
2067 : * tree below the root).
2068 : *
2069 : * Returns 'false' if leafbuf is unsafe to delete, usually because leafbuf is
2070 : * the rightmost child of its parent (and parent has more than one downlink).
2071 : * Returns 'true' when the first stage of page deletion completed
2072 : * successfully.
2073 : */
2074 : static bool
8 andres 2075 GNC 2670 : _bt_mark_page_halfdead(Relation rel, Relation heaprel, Buffer leafbuf,
2076 : BTStack stack)
2077 : {
2078 : BlockNumber leafblkno;
2079 : BlockNumber leafrightsib;
2080 : BlockNumber topparent;
1063 pg 2081 ECB : BlockNumber topparentrightsib;
3313 heikki.linnakangas 2082 : ItemId itemid;
2083 : Page page;
2084 : BTPageOpaque opaque;
2085 : Buffer subtreeparent;
2086 : OffsetNumber poffset;
2087 : OffsetNumber nextoffset;
2088 : IndexTuple itup;
2089 : IndexTupleData trunctuple;
2090 :
2545 kgrittn 2091 CBC 2670 : page = BufferGetPage(leafbuf);
373 michael 2092 2670 : opaque = BTPageGetOpaque(page);
2093 :
1063 pg 2094 GIC 2670 : Assert(!P_RIGHTMOST(opaque) && !P_ISROOT(opaque) &&
2095 : P_ISLEAF(opaque) && !P_IGNORE(opaque) &&
2096 : P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
2097 :
2098 : /*
2099 : * Save info about the leaf page.
2100 : */
3313 heikki.linnakangas 2101 CBC 2670 : leafblkno = BufferGetBlockNumber(leafbuf);
3313 heikki.linnakangas 2102 GIC 2670 : leafrightsib = opaque->btpo_next;
3313 heikki.linnakangas 2103 EUB :
2104 : /*
2878 bruce 2105 : * Before attempting to lock the parent page, check that the right sibling
2106 : * is not in half-dead state. A half-dead right sibling would have no
2107 : * downlink in the parent, which would be highly confusing later when we
2108 : * delete the downlink. It would fail the "right sibling of target page
2109 : * is also the next child in parent page" cross-check below.
2110 : */
8 andres 2111 GNC 2670 : if (_bt_rightsib_halfdeadflag(rel, heaprel, leafrightsib))
2112 : {
3241 heikki.linnakangas 2113 UIC 0 : elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
2114 : leafblkno, leafrightsib);
2115 0 : return false;
2116 : }
2117 :
2118 : /*
2119 : * We cannot delete a page that is the rightmost child of its immediate
2120 : * parent, unless it is the only child --- in which case the parent has to
2121 : * be deleted too, and the same condition applies recursively to it. We
2122 : * have to check this condition all the way up before trying to delete,
1063 pg 2123 ECB : * and lock the parent of the root of the to-be-deleted subtree (the
2124 : * "subtree parent"). _bt_lock_subtree_parent() locks the subtree parent
2125 : * for us. We remove the downlink to the "top parent" page (subtree root
2126 : * page) from the subtree parent page below.
2127 : *
2128 : * Initialize topparent to be leafbuf page now. The final to-be-deleted
2129 : * subtree is often a degenerate one page subtree consisting only of the
2130 : * leafbuf page. When that happens, the leafbuf page is the final subtree
2131 : * root page/top parent page.
2132 : */
1063 pg 2133 GIC 2670 : topparent = leafblkno;
2134 2670 : topparentrightsib = leafrightsib;
8 andres 2135 GNC 2670 : if (!_bt_lock_subtree_parent(rel, heaprel, leafblkno, stack,
1063 pg 2136 ECB : &subtreeparent, &poffset,
2137 : &topparent, &topparentrightsib))
3313 heikki.linnakangas 2138 GIC 1 : return false;
2139 :
2140 : /*
2141 : * Check that the parent-page index items we're about to delete/overwrite
2142 : * in subtree parent page contain what we expect. This can fail if the
2143 : * index has become corrupt for some reason. We want to throw any error
2144 : * before entering the critical section --- otherwise it'd be a PANIC.
3313 heikki.linnakangas 2145 ECB : */
1063 pg 2146 CBC 2669 : page = BufferGetPage(subtreeparent);
373 michael 2147 2669 : opaque = BTPageGetOpaque(page);
2148 :
2149 : #ifdef USE_ASSERT_CHECKING
1060 tgl 2150 ECB :
1063 pg 2151 : /*
2152 : * This is just an assertion because _bt_lock_subtree_parent should have
2153 : * guaranteed tuple has the expected contents
1063 pg 2154 EUB : */
1063 pg 2155 GIC 2669 : itemid = PageGetItemId(page, poffset);
3313 heikki.linnakangas 2156 2669 : itup = (IndexTuple) PageGetItem(page, itemid);
1063 pg 2157 2669 : Assert(BTreeTupleGetDownLink(itup) == topparent);
2158 : #endif
2159 :
2160 2669 : nextoffset = OffsetNumberNext(poffset);
3313 heikki.linnakangas 2161 2669 : itemid = PageGetItemId(page, nextoffset);
2162 2669 : itup = (IndexTuple) PageGetItem(page, itemid);
1063 pg 2163 2669 : if (BTreeTupleGetDownLink(itup) != topparentrightsib)
1347 peter 2164 UIC 0 : ereport(ERROR,
2165 : (errcode(ERRCODE_INDEX_CORRUPTED),
1063 pg 2166 ECB : errmsg_internal("right sibling %u of block %u is not next child %u of block %u in index \"%s\"",
2167 : topparentrightsib, topparent,
2168 : BTreeTupleGetDownLink(itup),
2169 : BufferGetBlockNumber(subtreeparent),
2170 : RelationGetRelationName(rel))));
2171 :
2172 : /*
2173 : * Any insert which would have gone on the leaf block will now go to its
2174 : * right sibling. In other words, the key space moves right.
2175 : */
3313 heikki.linnakangas 2176 GIC 2669 : PredicateLockPageCombine(rel, leafblkno, leafrightsib);
2177 :
2178 : /* No ereport(ERROR) until changes are logged */
2179 2669 : START_CRIT_SECTION();
2180 :
2181 : /*
2182 : * Update parent of subtree. We want to delete the downlink to the top
1063 pg 2183 ECB : * parent page/root of the subtree, and the *following* key. Easiest way
2184 : * is to copy the right sibling's downlink over the downlink that points
2185 : * to top parent page, and then delete the right sibling's original pivot
2186 : * tuple.
2187 : *
2188 : * Lanin and Shasha make the key space move left when deleting a page,
2189 : * whereas the key space moves right here. That's why we cannot simply
2190 : * delete the pivot tuple with the downlink to the top parent page. See
2191 : * nbtree/README.
2192 : */
1063 pg 2193 GIC 2669 : page = BufferGetPage(subtreeparent);
373 michael 2194 2669 : opaque = BTPageGetOpaque(page);
2195 :
1063 pg 2196 2669 : itemid = PageGetItemId(page, poffset);
3313 heikki.linnakangas 2197 2669 : itup = (IndexTuple) PageGetItem(page, itemid);
1063 pg 2198 CBC 2669 : BTreeTupleSetDownLink(itup, topparentrightsib);
3313 heikki.linnakangas 2199 ECB :
1063 pg 2200 CBC 2669 : nextoffset = OffsetNumberNext(poffset);
3313 heikki.linnakangas 2201 GIC 2669 : PageIndexTupleDelete(page, nextoffset);
3313 heikki.linnakangas 2202 ECB :
2203 : /*
1063 pg 2204 : * Mark the leaf page as half-dead, and stamp it with a link to the top
2205 : * parent page. When the leaf page is also the top parent page, the link
2206 : * is set to InvalidBlockNumber.
2207 : */
2545 kgrittn 2208 CBC 2669 : page = BufferGetPage(leafbuf);
373 michael 2209 GIC 2669 : opaque = BTPageGetOpaque(page);
3313 heikki.linnakangas 2210 CBC 2669 : opaque->btpo_flags |= BTP_HALF_DEAD;
3313 heikki.linnakangas 2211 ECB :
1335 pg 2212 GBC 2669 : Assert(PageGetMaxOffsetNumber(page) == P_HIKEY);
3273 heikki.linnakangas 2213 GIC 2669 : MemSet(&trunctuple, 0, sizeof(IndexTupleData));
2214 2669 : trunctuple.t_info = sizeof(IndexTupleData);
1063 pg 2215 CBC 2669 : if (topparent != leafblkno)
2216 46 : BTreeTupleSetTopParent(&trunctuple, topparent);
2217 : else
1812 teodor 2218 GIC 2623 : BTreeTupleSetTopParent(&trunctuple, InvalidBlockNumber);
1816 teodor 2219 ECB :
1335 pg 2220 GIC 2669 : if (!PageIndexTupleOverwrite(page, P_HIKEY, (Item) &trunctuple,
2221 2669 : IndexTupleSize(&trunctuple)))
1335 pg 2222 UIC 0 : elog(ERROR, "could not overwrite high key in half-dead page");
2223 :
3313 heikki.linnakangas 2224 ECB : /* Must mark buffers dirty before XLogInsert */
1063 pg 2225 CBC 2669 : MarkBufferDirty(subtreeparent);
3313 heikki.linnakangas 2226 2669 : MarkBufferDirty(leafbuf);
3313 heikki.linnakangas 2227 ECB :
2228 : /* XLOG stuff */
3313 heikki.linnakangas 2229 CBC 2669 : if (RelationNeedsWAL(rel))
2230 : {
3313 heikki.linnakangas 2231 ECB : xl_btree_mark_page_halfdead xlrec;
2232 : XLogRecPtr recptr;
2233 :
1063 pg 2234 GIC 2669 : xlrec.poffset = poffset;
3313 heikki.linnakangas 2235 CBC 2669 : xlrec.leafblk = leafblkno;
1063 pg 2236 2669 : if (topparent != leafblkno)
2237 46 : xlrec.topparent = topparent;
3273 heikki.linnakangas 2238 ECB : else
3273 heikki.linnakangas 2239 GIC 2623 : xlrec.topparent = InvalidBlockNumber;
3313 heikki.linnakangas 2240 ECB :
3062 heikki.linnakangas 2241 GIC 2669 : XLogBeginInsert();
3062 heikki.linnakangas 2242 CBC 2669 : XLogRegisterBuffer(0, leafbuf, REGBUF_WILL_INIT);
1063 pg 2243 GIC 2669 : XLogRegisterBuffer(1, subtreeparent, REGBUF_STANDARD);
3062 heikki.linnakangas 2244 ECB :
2545 kgrittn 2245 CBC 2669 : page = BufferGetPage(leafbuf);
373 michael 2246 2669 : opaque = BTPageGetOpaque(page);
3313 heikki.linnakangas 2247 2669 : xlrec.leftblk = opaque->btpo_prev;
3313 heikki.linnakangas 2248 GIC 2669 : xlrec.rightblk = opaque->btpo_next;
2249 :
3062 heikki.linnakangas 2250 CBC 2669 : XLogRegisterData((char *) &xlrec, SizeOfBtreeMarkPageHalfDead);
2251 :
2252 2669 : recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_MARK_PAGE_HALFDEAD);
3313 heikki.linnakangas 2253 ECB :
1063 pg 2254 GIC 2669 : page = BufferGetPage(subtreeparent);
3313 heikki.linnakangas 2255 2669 : PageSetLSN(page, recptr);
2545 kgrittn 2256 2669 : page = BufferGetPage(leafbuf);
3313 heikki.linnakangas 2257 2669 : PageSetLSN(page, recptr);
2258 : }
2259 :
2260 2669 : END_CRIT_SECTION();
2261 :
1063 pg 2262 2669 : _bt_relbuf(rel, subtreeparent);
3313 heikki.linnakangas 2263 2669 : return true;
2264 : }
2265 :
2266 : /*
2267 : * Second stage of page deletion.
2268 : *
2269 : * Unlinks a single page (in the subtree undergoing deletion) from its
2270 : * siblings. Also marks the page deleted.
2271 : *
2272 : * To get rid of the whole subtree, including the leaf page itself, call here
2273 : * until the leaf page is deleted. The original "top parent" established in
2274 : * the first stage of deletion is deleted in the first call here, while the
2275 : * leaf page is deleted in the last call here. Note that the leaf page itself
2276 : * is often the initial top parent page.
2277 : *
2278 : * Returns 'false' if the page could not be unlinked (shouldn't happen). If
2279 : * the right sibling of the current target page is empty, *rightsib_empty is
2280 : * set to true, allowing caller to delete the target's right sibling page in
2281 : * passing. Note that *rightsib_empty is only actually used by caller when
1073 pg 2282 ECB : * target page is leafbuf, following last call here for leafbuf/the subtree
2283 : * containing leafbuf. (We always set *rightsib_empty for caller, just to be
2284 : * consistent.)
2285 : *
2437 tgl 2286 : * Must hold pin and lock on leafbuf at entry (read or write doesn't matter).
2287 : * On success exit, we'll be holding pin and write lock. On failure exit,
2288 : * we'll release both pin and lock before returning (we define it that way
2289 : * to avoid having to reacquire a lock we already released).
2290 : */
2291 : static bool
1073 pg 2292 CBC 2755 : _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, BlockNumber scanblkno,
2293 : bool *rightsib_empty, BTVacState *vstate)
2294 : {
3313 heikki.linnakangas 2295 2755 : BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
773 pg 2296 2755 : IndexBulkDeleteResult *stats = vstate->stats;
3313 heikki.linnakangas 2297 ECB : BlockNumber leafleftsib;
2298 : BlockNumber leafrightsib;
2299 : BlockNumber target;
2300 : BlockNumber leftsib;
2301 : BlockNumber rightsib;
3313 heikki.linnakangas 2302 GIC 2755 : Buffer lbuf = InvalidBuffer;
2303 : Buffer buf;
2304 : Buffer rbuf;
2305 2755 : Buffer metabuf = InvalidBuffer;
2306 2755 : Page metapg = NULL;
3313 heikki.linnakangas 2307 CBC 2755 : BTMetaPageData *metad = NULL;
3313 heikki.linnakangas 2308 ECB : ItemId itemid;
2309 : Page page;
2310 : BTPageOpaque opaque;
2311 : FullTransactionId safexid;
2312 : bool rightsib_is_rightmost;
2313 : uint32 targetlevel;
2314 : IndexTuple leafhikey;
774 pg 2315 : BlockNumber leaftopparent;
3313 heikki.linnakangas 2316 :
2545 kgrittn 2317 CBC 2755 : page = BufferGetPage(leafbuf);
373 michael 2318 2755 : opaque = BTPageGetOpaque(page);
3313 heikki.linnakangas 2319 ECB :
1063 pg 2320 GIC 2755 : Assert(P_ISLEAF(opaque) && !P_ISDELETED(opaque) && P_ISHALFDEAD(opaque));
3313 heikki.linnakangas 2321 ECB :
2322 : /*
2323 : * Remember some information about the leaf page.
2324 : */
3313 heikki.linnakangas 2325 GIC 2755 : itemid = PageGetItemId(page, P_HIKEY);
1812 teodor 2326 2755 : leafhikey = (IndexTuple) PageGetItem(page, itemid);
1079 pg 2327 CBC 2755 : target = BTreeTupleGetTopParent(leafhikey);
3313 heikki.linnakangas 2328 GIC 2755 : leafleftsib = opaque->btpo_prev;
2329 2755 : leafrightsib = opaque->btpo_next;
3313 heikki.linnakangas 2330 ECB :
992 pg 2331 GIC 2755 : _bt_unlockbuf(rel, leafbuf);
2332 :
1740 andres 2333 ECB : /*
2334 : * Check here, as calling loops will have locks held, preventing
2335 : * interrupts from being processed.
2336 : */
1740 andres 2337 CBC 2755 : CHECK_FOR_INTERRUPTS();
2338 :
2339 : /* Unlink the current top parent of the subtree */
1079 pg 2340 GIC 2755 : if (!BlockNumberIsValid(target))
2341 : {
1063 pg 2342 ECB : /* Target is leaf page (or leaf page is top parent, if you prefer) */
1079 pg 2343 GIC 2669 : target = leafblkno;
2344 :
1079 pg 2345 CBC 2669 : buf = leafbuf;
2346 2669 : leftsib = leafleftsib;
2347 2669 : targetlevel = 0;
1079 pg 2348 ECB : }
2349 : else
3313 heikki.linnakangas 2350 : {
2351 : /* Target is the internal page taken from leaf's top parent link */
2437 tgl 2352 GIC 86 : Assert(target != leafblkno);
2353 :
2354 : /* Fetch the block number of the target's left sibling */
8 andres 2355 GNC 86 : buf = _bt_getbuf(rel, vstate->info->heaprel, target, BT_READ);
2545 kgrittn 2356 CBC 86 : page = BufferGetPage(buf);
373 michael 2357 GIC 86 : opaque = BTPageGetOpaque(page);
3313 heikki.linnakangas 2358 86 : leftsib = opaque->btpo_prev;
774 pg 2359 86 : targetlevel = opaque->btpo_level;
1079 2360 86 : Assert(targetlevel > 0);
2361 :
2362 : /*
2363 : * To avoid deadlocks, we'd better drop the target page lock before
2364 : * going further.
2365 : */
992 2366 86 : _bt_unlockbuf(rel, buf);
2367 : }
6003 tgl 2368 ECB :
7350 2369 : /*
2370 : * We have to lock the pages we need to modify in the standard order:
2371 : * moving right, then up. Else we will deadlock against other writers.
7188 bruce 2372 : *
3313 heikki.linnakangas 2373 : * So, first lock the leaf page, if it's not the target. Then find and
2374 : * write-lock the current left sibling of the target page. The sibling
2375 : * that was current a moment ago could have split, so we may have to move
2376 : * right.
7350 tgl 2377 EUB : */
3313 heikki.linnakangas 2378 GIC 2755 : if (target != leafblkno)
992 pg 2379 86 : _bt_lockbuf(rel, leafbuf, BT_WRITE);
7350 tgl 2380 2755 : if (leftsib != P_NONE)
2381 : {
8 andres 2382 GNC 493 : lbuf = _bt_getbuf(rel, vstate->info->heaprel, leftsib, BT_WRITE);
2545 kgrittn 2383 GIC 493 : page = BufferGetPage(lbuf);
373 michael 2384 493 : opaque = BTPageGetOpaque(page);
7350 tgl 2385 493 : while (P_ISDELETED(opaque) || opaque->btpo_next != target)
2386 : {
697 tgl 2387 UIC 0 : bool leftsibvalid = true;
1740 andres 2388 EUB :
2389 : /*
794 pg 2390 : * Before we follow the link from the page that was the left
2391 : * sibling mere moments ago, validate its right link. This
2392 : * reduces the opportunities for loop to fail to ever make any
2393 : * progress in the presence of index corruption.
2394 : *
2395 : * Note: we rely on the assumption that there can only be one
2396 : * vacuum process running at a time (against the same index).
1740 andres 2397 : */
794 pg 2398 UIC 0 : if (P_RIGHTMOST(opaque) || P_ISDELETED(opaque) ||
2399 0 : leftsib == opaque->btpo_next)
794 pg 2400 UBC 0 : leftsibvalid = false;
794 pg 2401 EUB :
794 pg 2402 UIC 0 : leftsib = opaque->btpo_next;
2403 0 : _bt_relbuf(rel, lbuf);
2404 :
2405 0 : if (!leftsibvalid)
7350 tgl 2406 EUB : {
2437 tgl 2407 UIC 0 : if (target != leafblkno)
2408 : {
2437 tgl 2409 EUB : /* we have only a pin on target, but pin+lock on leafbuf */
2437 tgl 2410 UIC 0 : ReleaseBuffer(buf);
2411 0 : _bt_relbuf(rel, leafbuf);
2412 : }
2413 : else
2414 : {
2415 : /* we have only a pin on leafbuf */
2437 tgl 2416 UBC 0 : ReleaseBuffer(leafbuf);
2417 : }
2418 :
794 pg 2419 0 : ereport(LOG,
2420 : (errcode(ERRCODE_INDEX_CORRUPTED),
2421 : errmsg_internal("valid left sibling for deletion target could not be located: "
794 pg 2422 EUB : "left sibling %u of target %u with leafblkno %u and scanblkno %u in index \"%s\"",
2423 : leftsib, target, leafblkno, scanblkno,
2424 : RelationGetRelationName(rel))));
2425 :
3313 heikki.linnakangas 2426 UIC 0 : return false;
2427 : }
794 pg 2428 ECB :
794 pg 2429 UIC 0 : CHECK_FOR_INTERRUPTS();
2430 :
794 pg 2431 ECB : /* step right one page */
8 andres 2432 UNC 0 : lbuf = _bt_getbuf(rel, vstate->info->heaprel, leftsib, BT_WRITE);
2545 kgrittn 2433 LBC 0 : page = BufferGetPage(lbuf);
373 michael 2434 UIC 0 : opaque = BTPageGetOpaque(page);
2435 : }
2436 : }
2437 : else
7350 tgl 2438 GIC 2262 : lbuf = InvalidBuffer;
2439 :
794 pg 2440 ECB : /* Next write-lock the target page itself */
992 pg 2441 GBC 2755 : _bt_lockbuf(rel, buf, BT_WRITE);
2545 kgrittn 2442 GIC 2755 : page = BufferGetPage(buf);
373 michael 2443 2755 : opaque = BTPageGetOpaque(page);
7188 bruce 2444 ECB :
7350 tgl 2445 EUB : /*
2446 : * Check page is still empty etc, else abandon deletion. This is just for
2447 : * paranoia's sake; a half-dead page cannot resurrect because there can be
2448 : * only one vacuum process running at a time.
2449 : */
3313 heikki.linnakangas 2450 GIC 2755 : if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
768 pg 2451 LBC 0 : elog(ERROR, "target page changed status unexpectedly in block %u of index \"%s\"",
2452 : target, RelationGetRelationName(rel));
1063 pg 2453 ECB :
7350 tgl 2454 CBC 2755 : if (opaque->btpo_prev != leftsib)
1347 peter 2455 UBC 0 : ereport(ERROR,
2456 : (errcode(ERRCODE_INDEX_CORRUPTED),
2457 : errmsg_internal("target page left link unexpectedly changed from %u to %u in block %u of index \"%s\"",
2458 : leftsib, opaque->btpo_prev, target,
768 pg 2459 ECB : RelationGetRelationName(rel))));
2460 :
3313 heikki.linnakangas 2461 GIC 2755 : if (target == leafblkno)
2462 : {
2463 2669 : if (P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
2464 2669 : !P_ISLEAF(opaque) || !P_ISHALFDEAD(opaque))
768 pg 2465 LBC 0 : elog(ERROR, "target leaf page changed status unexpectedly in block %u of index \"%s\"",
3313 heikki.linnakangas 2466 ECB : target, RelationGetRelationName(rel));
774 pg 2467 EUB :
2468 : /* Leaf page is also target page: don't set leaftopparent */
774 pg 2469 GIC 2669 : leaftopparent = InvalidBlockNumber;
2470 : }
3313 heikki.linnakangas 2471 ECB : else
2472 : {
774 pg 2473 : IndexTuple finaldataitem;
2474 :
3313 heikki.linnakangas 2475 CBC 86 : if (P_FIRSTDATAKEY(opaque) != PageGetMaxOffsetNumber(page) ||
2476 86 : P_ISLEAF(opaque))
768 pg 2477 UIC 0 : elog(ERROR, "target internal page on level %u changed status unexpectedly in block %u of index \"%s\"",
2478 : targetlevel, target, RelationGetRelationName(rel));
2479 :
774 pg 2480 ECB : /* Target is internal: set leaftopparent for next call here... */
3313 heikki.linnakangas 2481 GIC 86 : itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque));
774 pg 2482 86 : finaldataitem = (IndexTuple) PageGetItem(page, itemid);
2483 86 : leaftopparent = BTreeTupleGetDownLink(finaldataitem);
2484 : /* ...except when it would be a redundant pointer-to-self */
774 pg 2485 CBC 86 : if (leaftopparent == leafblkno)
2486 46 : leaftopparent = InvalidBlockNumber;
3313 heikki.linnakangas 2487 ECB : }
2488 :
768 pg 2489 : /* No leaftopparent for level 0 (leaf page) or level 1 target */
768 pg 2490 GBC 2755 : Assert(!BlockNumberIsValid(leaftopparent) || targetlevel > 1);
2491 :
2492 : /*
2493 : * And next write-lock the (current) right sibling.
2494 : */
7350 tgl 2495 GIC 2755 : rightsib = opaque->btpo_next;
8 andres 2496 GNC 2755 : rbuf = _bt_getbuf(rel, vstate->info->heaprel, rightsib, BT_WRITE);
2545 kgrittn 2497 CBC 2755 : page = BufferGetPage(rbuf);
373 michael 2498 GIC 2755 : opaque = BTPageGetOpaque(page);
4606 tgl 2499 2755 : if (opaque->btpo_prev != target)
1347 peter 2500 UIC 0 : ereport(ERROR,
2501 : (errcode(ERRCODE_INDEX_CORRUPTED),
2502 : errmsg_internal("right sibling's left-link doesn't match: "
2503 : "block %u links to %u instead of expected %u in index \"%s\"",
2504 : rightsib, opaque->btpo_prev, target,
2505 : RelationGetRelationName(rel))));
3313 heikki.linnakangas 2506 GIC 2755 : rightsib_is_rightmost = P_RIGHTMOST(opaque);
2507 2755 : *rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
7188 bruce 2508 ECB :
2509 : /*
6385 2510 : * If we are deleting the next-to-last page on the target's level, then
2511 : * the rightsib is a candidate to become the new fast root. (In theory, it
2512 : * might be possible to push the fast root even further down, but the odds
2513 : * of doing so are slim, and the locking considerations daunting.)
2514 : *
7350 tgl 2515 : * We can safely acquire a lock on the metapage here --- see comments for
2516 : * _bt_newroot().
2517 : */
3313 heikki.linnakangas 2518 CBC 2755 : if (leftsib == P_NONE && rightsib_is_rightmost)
2519 : {
2545 kgrittn 2520 GIC 23 : page = BufferGetPage(rbuf);
373 michael 2521 23 : opaque = BTPageGetOpaque(page);
7350 tgl 2522 23 : if (P_RIGHTMOST(opaque))
2523 : {
2524 : /* rightsib will be the only one left on the level */
8 andres 2525 GNC 23 : metabuf = _bt_getbuf(rel, vstate->info->heaprel, BTREE_METAPAGE,
2526 : BT_WRITE);
2545 kgrittn 2527 GIC 23 : metapg = BufferGetPage(metabuf);
7350 tgl 2528 23 : metad = BTPageGetMeta(metapg);
7188 bruce 2529 EUB :
7350 tgl 2530 : /*
2531 : * The expected case here is btm_fastlevel == targetlevel+1; if
2532 : * the fastlevel is <= targetlevel, something is wrong, and we
2533 : * choose to overwrite it to fix it.
2534 : */
7188 bruce 2535 GIC 23 : if (metad->btm_fastlevel > targetlevel + 1)
2536 : {
2537 : /* no update wanted */
7350 tgl 2538 UIC 0 : _bt_relbuf(rel, metabuf);
2539 0 : metabuf = InvalidBuffer;
7350 tgl 2540 ECB : }
2541 : }
2542 : }
2543 :
2544 : /*
2545 : * Here we begin doing the deletion.
2546 : */
2547 :
2548 : /* No ereport(ERROR) until changes are logged */
7350 tgl 2549 CBC 2755 : START_CRIT_SECTION();
7350 tgl 2550 ECB :
2551 : /*
6385 bruce 2552 : * Update siblings' side-links. Note the target page's side-links will
2553 : * continue to point to the siblings. Asserts here are just rechecking
4606 tgl 2554 : * things we already verified above.
7350 2555 : */
7350 tgl 2556 CBC 2755 : if (BufferIsValid(lbuf))
7350 tgl 2557 ECB : {
2545 kgrittn 2558 GIC 493 : page = BufferGetPage(lbuf);
373 michael 2559 493 : opaque = BTPageGetOpaque(page);
7350 tgl 2560 493 : Assert(opaque->btpo_next == target);
2561 493 : opaque->btpo_next = rightsib;
2562 : }
2545 kgrittn 2563 2755 : page = BufferGetPage(rbuf);
373 michael 2564 2755 : opaque = BTPageGetOpaque(page);
7350 tgl 2565 2755 : Assert(opaque->btpo_prev == target);
2566 2755 : opaque->btpo_prev = leftsib;
2567 :
2568 : /*
2569 : * If we deleted a parent of the targeted leaf page, instead of the leaf
3313 heikki.linnakangas 2570 ECB : * itself, update the leaf to point to the next remaining child in the
1063 pg 2571 : * subtree.
2572 : *
2573 : * Note: We rely on the fact that a buffer pin on the leaf page has been
2574 : * held since leafhikey was initialized. This is safe, though only
2575 : * because the page was already half-dead at that point. The leaf page
2576 : * cannot have been modified by any other backend during the period when
2577 : * no lock was held.
2578 : */
3313 heikki.linnakangas 2579 GIC 2755 : if (target != leafblkno)
774 pg 2580 86 : BTreeTupleSetTopParent(leafhikey, leaftopparent);
2581 :
2582 : /*
7350 tgl 2583 ECB : * Mark the page itself deleted. It can be recycled when all current
3964 simon 2584 : * transactions are gone. Storing GetTopTransactionId() would work, but
2585 : * we're in VACUUM and would not otherwise have an XID. Having already
2586 : * updated links to the target, ReadNextFullTransactionId() suffices as an
2587 : * upper bound. Any scan having retained a now-stale link is advertising
2588 : * in its PGPROC an xmin less than or equal to the value we read here. It
2589 : * will continue to do so, holding back the xmin horizon, for the duration
2590 : * of that scan.
7350 tgl 2591 : */
2545 kgrittn 2592 CBC 2755 : page = BufferGetPage(buf);
373 michael 2593 2755 : opaque = BTPageGetOpaque(page);
1073 pg 2594 GIC 2755 : Assert(P_ISHALFDEAD(opaque) || !P_ISLEAF(opaque));
2595 :
977 akorotkov 2596 ECB : /*
2597 : * Store upper bound XID that's used to determine when deleted page is no
2598 : * longer needed as a tombstone
2599 : */
774 pg 2600 GBC 2755 : safexid = ReadNextFullTransactionId();
774 pg 2601 CBC 2755 : BTPageSetDeleted(page, safexid);
2602 2755 : opaque->btpo_cycleid = 0;
977 akorotkov 2603 ECB :
2604 : /* And update the metapage, if needed */
7350 tgl 2605 GIC 2755 : if (BufferIsValid(metabuf))
2606 : {
1831 teodor 2607 ECB : /* upgrade metapage if needed */
1481 pg 2608 CBC 23 : if (metad->btm_version < BTREE_NOVAC_VERSION)
1831 teodor 2609 LBC 0 : _bt_upgrademetapage(metapg);
7350 tgl 2610 CBC 23 : metad->btm_fastroot = rightsib;
2611 23 : metad->btm_fastlevel = targetlevel;
6218 2612 23 : MarkBufferDirty(metabuf);
2613 : }
2614 :
6218 tgl 2615 ECB : /* Must mark buffers dirty before XLogInsert */
6218 tgl 2616 GIC 2755 : MarkBufferDirty(rbuf);
2617 2755 : MarkBufferDirty(buf);
2618 2755 : if (BufferIsValid(lbuf))
2619 493 : MarkBufferDirty(lbuf);
3313 heikki.linnakangas 2620 2755 : if (target != leafblkno)
2621 86 : MarkBufferDirty(leafbuf);
6218 tgl 2622 ECB :
2623 : /* XLOG stuff */
4500 rhaas 2624 CBC 2755 : if (RelationNeedsWAL(rel))
7350 tgl 2625 ECB : {
3313 heikki.linnakangas 2626 : xl_btree_unlink_page xlrec;
7350 tgl 2627 : xl_btree_metadata xlmeta;
2628 : uint8 xlinfo;
2629 : XLogRecPtr recptr;
2630 :
3062 heikki.linnakangas 2631 GIC 2755 : XLogBeginInsert();
3062 heikki.linnakangas 2632 ECB :
3062 heikki.linnakangas 2633 CBC 2755 : XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
2634 2755 : if (BufferIsValid(lbuf))
2635 493 : XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
3062 heikki.linnakangas 2636 GIC 2755 : XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD);
2637 2755 : if (target != leafblkno)
3062 heikki.linnakangas 2638 CBC 86 : XLogRegisterBuffer(3, leafbuf, REGBUF_WILL_INIT);
3313 heikki.linnakangas 2639 ECB :
774 pg 2640 : /* information stored on the target/to-be-unlinked block */
3313 heikki.linnakangas 2641 GIC 2755 : xlrec.leftsib = leftsib;
3313 heikki.linnakangas 2642 CBC 2755 : xlrec.rightsib = rightsib;
774 pg 2643 GIC 2755 : xlrec.level = targetlevel;
774 pg 2644 CBC 2755 : xlrec.safexid = safexid;
2645 :
3313 heikki.linnakangas 2646 ECB : /* information needed to recreate the leaf block (if not the target) */
3313 heikki.linnakangas 2647 GIC 2755 : xlrec.leafleftsib = leafleftsib;
3313 heikki.linnakangas 2648 CBC 2755 : xlrec.leafrightsib = leafrightsib;
774 pg 2649 2755 : xlrec.leaftopparent = leaftopparent;
3313 heikki.linnakangas 2650 ECB :
3062 heikki.linnakangas 2651 CBC 2755 : XLogRegisterData((char *) &xlrec, SizeOfBtreeUnlinkPage);
7350 tgl 2652 ECB :
7350 tgl 2653 CBC 2755 : if (BufferIsValid(metabuf))
7350 tgl 2654 ECB : {
1983 tgl 2655 CBC 23 : XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
2656 :
1481 pg 2657 23 : Assert(metad->btm_version >= BTREE_NOVAC_VERSION);
2658 23 : xlmeta.version = metad->btm_version;
7350 tgl 2659 GIC 23 : xlmeta.root = metad->btm_root;
2660 23 : xlmeta.level = metad->btm_level;
7350 tgl 2661 CBC 23 : xlmeta.fastroot = metad->btm_fastroot;
7350 tgl 2662 GIC 23 : xlmeta.fastlevel = metad->btm_fastlevel;
774 pg 2663 CBC 23 : xlmeta.last_cleanup_num_delpages = metad->btm_last_cleanup_num_delpages;
1138 pg 2664 GIC 23 : xlmeta.allequalimage = metad->btm_allequalimage;
7350 tgl 2665 ECB :
3062 heikki.linnakangas 2666 GIC 23 : XLogRegisterBufData(4, (char *) &xlmeta, sizeof(xl_btree_metadata));
3313 heikki.linnakangas 2667 CBC 23 : xlinfo = XLOG_BTREE_UNLINK_PAGE_META;
2668 : }
7350 tgl 2669 ECB : else
3313 heikki.linnakangas 2670 CBC 2732 : xlinfo = XLOG_BTREE_UNLINK_PAGE;
7350 tgl 2671 ECB :
3062 heikki.linnakangas 2672 CBC 2755 : recptr = XLogInsert(RM_BTREE_ID, xlinfo);
7350 tgl 2673 ECB :
7350 tgl 2674 GIC 2755 : if (BufferIsValid(metabuf))
7350 tgl 2675 ECB : {
7350 tgl 2676 CBC 23 : PageSetLSN(metapg, recptr);
2677 : }
2545 kgrittn 2678 2755 : page = BufferGetPage(rbuf);
7350 tgl 2679 GIC 2755 : PageSetLSN(page, recptr);
2545 kgrittn 2680 CBC 2755 : page = BufferGetPage(buf);
7350 tgl 2681 2755 : PageSetLSN(page, recptr);
7350 tgl 2682 GIC 2755 : if (BufferIsValid(lbuf))
2683 : {
2545 kgrittn 2684 493 : page = BufferGetPage(lbuf);
7350 tgl 2685 CBC 493 : PageSetLSN(page, recptr);
2686 : }
3313 heikki.linnakangas 2687 GIC 2755 : if (target != leafblkno)
3313 heikki.linnakangas 2688 ECB : {
2545 kgrittn 2689 CBC 86 : page = BufferGetPage(leafbuf);
3313 heikki.linnakangas 2690 GIC 86 : PageSetLSN(page, recptr);
2691 : }
7350 tgl 2692 ECB : }
2693 :
7350 tgl 2694 CBC 2755 : END_CRIT_SECTION();
2695 :
2696 : /* release metapage */
2697 2755 : if (BufferIsValid(metabuf))
6218 2698 23 : _bt_relbuf(rel, metabuf);
2699 :
2700 : /* release siblings */
7350 tgl 2701 GIC 2755 : if (BufferIsValid(lbuf))
6218 2702 493 : _bt_relbuf(rel, lbuf);
3313 heikki.linnakangas 2703 2755 : _bt_relbuf(rel, rbuf);
2704 :
2705 : /* If the target is not leafbuf, we're done with it now -- release it */
774 pg 2706 2755 : if (target != leafblkno)
2707 86 : _bt_relbuf(rel, buf);
2708 :
1073 pg 2709 ECB : /*
773 2710 : * Maintain pages_newly_deleted, which is simply the number of pages
2711 : * deleted by the ongoing VACUUM operation.
2712 : *
2713 : * Maintain pages_deleted in a way that takes into account how
2714 : * btvacuumpage() will count deleted pages that have yet to become
2715 : * scanblkno -- only count page when it's not going to get that treatment
2716 : * later on.
2717 : */
773 pg 2718 GIC 2755 : stats->pages_newly_deleted++;
1073 pg 2719 CBC 2755 : if (target <= scanblkno)
773 pg 2720 GIC 2676 : stats->pages_deleted++;
1073 pg 2721 ECB :
2722 : /*
2723 : * Remember information about the target page (now a newly deleted page)
2724 : * in dedicated vstate space for later. The page will be considered as a
2725 : * candidate to place in the FSM at the end of the current btvacuumscan()
2726 : * call.
2727 : */
749 pg 2728 GIC 2755 : _bt_pendingfsm_add(vstate, target, safexid);
2729 :
3313 heikki.linnakangas 2730 2755 : return true;
2731 : }
2732 :
2733 : /*
2734 : * Establish how tall the to-be-deleted subtree will be during the first stage
2735 : * of page deletion.
2736 : *
2737 : * Caller's child argument is the block number of the page caller wants to
2738 : * delete (this is leafbuf's block number, except when we're called
2739 : * recursively). stack is a search stack leading to it. Note that we will
2740 : * update the stack entry(s) to reflect current downlink positions --- this is
2741 : * similar to the corresponding point in page split handling.
2742 : *
2743 : * If "first stage" caller cannot go ahead with deleting _any_ pages, returns
2744 : * false. Returns true on success, in which case caller can use certain
2745 : * details established here to perform the first stage of deletion. This
2746 : * function is the last point at which page deletion may be deemed unsafe
2747 : * (barring index corruption, or unexpected concurrent page deletions).
2748 : *
2749 : * We write lock the parent of the root of the to-be-deleted subtree for
2750 : * caller on success (i.e. we leave our lock on the *subtreeparent buffer for
2751 : * caller). Caller will have to remove a downlink from *subtreeparent. We
2752 : * also set a *subtreeparent offset number in *poffset, to indicate the
2753 : * location of the pivot tuple that contains the relevant downlink.
2754 : *
2755 : * The root of the to-be-deleted subtree is called the "top parent". Note
2756 : * that the leafbuf page is often the final "top parent" page (you can think
1063 pg 2757 ECB : * of the leafbuf page as a degenerate single page subtree when that happens).
2758 : * Caller should initialize *topparent to the target leafbuf page block number
2759 : * (while *topparentrightsib should be set to leafbuf's right sibling block
2760 : * number). We will update *topparent (and *topparentrightsib) for caller
2761 : * here, though only when it turns out that caller will delete at least one
2762 : * internal page (i.e. only when caller needs to store a valid link to the top
2763 : * parent block in the leafbuf page using BTreeTupleSetTopParent()).
2764 : */
2765 : static bool
8 andres 2766 GNC 2758 : _bt_lock_subtree_parent(Relation rel, Relation heaprel, BlockNumber child,
2767 : BTStack stack, Buffer *subtreeparent,
2768 : OffsetNumber *poffset, BlockNumber *topparent,
2769 : BlockNumber *topparentrightsib)
2770 : {
2771 : BlockNumber parent,
2772 : leftsibparent;
2773 : OffsetNumber parentoffset,
2774 : maxoff;
1063 pg 2775 ECB : Buffer pbuf;
2776 : Page page;
2777 : BTPageOpaque opaque;
2778 :
2779 : /*
2780 : * Locate the pivot tuple whose downlink points to "child". Write lock
2781 : * the parent page itself.
2782 : */
8 andres 2783 GNC 2758 : pbuf = _bt_getstackbuf(rel, heaprel, stack, child);
1063 pg 2784 GIC 2758 : if (pbuf == InvalidBuffer)
2785 : {
2786 : /*
2787 : * Failed to "re-find" a pivot tuple whose downlink matched our child
2788 : * block number on the parent level -- the index must be corrupt.
2789 : * Don't even try to delete the leafbuf subtree. Just report the
747 pg 2790 EUB : * issue and press on with vacuuming the index.
2791 : *
2792 : * Note: _bt_getstackbuf() recovers from concurrent page splits that
2793 : * take place on the parent level. Its approach is a near-exhaustive
2794 : * linear search. This also gives it a surprisingly good chance of
2795 : * recovering in the event of a buggy or inconsistent opclass. But we
2796 : * don't rely on that here.
747 pg 2797 ECB : */
747 pg 2798 LBC 0 : ereport(LOG,
2799 : (errcode(ERRCODE_INDEX_CORRUPTED),
1063 pg 2800 ECB : errmsg_internal("failed to re-find parent key in index \"%s\" for deletion target page %u",
2801 : RelationGetRelationName(rel), child)));
747 pg 2802 LBC 0 : return false;
747 pg 2803 ECB : }
2804 :
1063 pg 2805 GIC 2758 : parent = stack->bts_blkno;
2806 2758 : parentoffset = stack->bts_offset;
2807 :
2808 2758 : page = BufferGetPage(pbuf);
373 michael 2809 2758 : opaque = BTPageGetOpaque(page);
1063 pg 2810 2758 : maxoff = PageGetMaxOffsetNumber(page);
2811 2758 : leftsibparent = opaque->btpo_prev;
2812 :
2813 : /*
2814 : * _bt_getstackbuf() completes page splits on returned parent buffer when
2815 : * required.
2816 : *
1063 pg 2817 ECB : * In general it's a bad idea for VACUUM to use up more disk space, which
2818 : * is why page deletion does not finish incomplete page splits most of the
2819 : * time. We allow this limited exception because the risk is much lower,
2820 : * and the potential downside of not proceeding is much higher: A single
2821 : * internal page with the INCOMPLETE_SPLIT flag set might otherwise
2822 : * prevent us from deleting hundreds of empty leaf pages from one level
2823 : * down.
2824 : */
1063 pg 2825 CBC 2758 : Assert(!P_INCOMPLETE_SPLIT(opaque));
1063 pg 2826 ECB :
1063 pg 2827 CBC 2758 : if (parentoffset < maxoff)
2828 : {
2829 : /*
2830 : * Child is not the rightmost child in parent, so it's safe to delete
2831 : * the subtree whose root/topparent is child page
2832 : */
1063 pg 2833 GIC 2669 : *subtreeparent = pbuf;
2834 2669 : *poffset = parentoffset;
2835 2669 : return true;
2836 : }
1063 pg 2837 ECB :
2838 : /*
2839 : * Child is the rightmost child of parent.
2840 : *
2841 : * Since it's the rightmost child of parent, deleting the child (or
2842 : * deleting the subtree whose root/topparent is the child page) is only
2843 : * safe when it's also possible to delete the parent.
2844 : */
1063 pg 2845 CBC 89 : Assert(parentoffset == maxoff);
1063 pg 2846 GIC 89 : if (parentoffset != P_FIRSTDATAKEY(opaque) || P_RIGHTMOST(opaque))
2847 : {
2848 : /*
2849 : * Child isn't parent's only child, or parent is rightmost on its
2850 : * entire level. Definitely cannot delete any pages.
2851 : */
2852 1 : _bt_relbuf(rel, pbuf);
2853 1 : return false;
2854 : }
1063 pg 2855 ECB :
2856 : /*
2857 : * Now make sure that the parent deletion is itself safe by examining the
2858 : * child's grandparent page. Recurse, passing the parent page as the
2859 : * child page (child's grandparent is the parent on the next level up). If
2860 : * parent deletion is unsafe, then child deletion must also be unsafe (in
2861 : * which case caller cannot delete any pages at all).
2862 : */
1063 pg 2863 GIC 88 : *topparent = parent;
2864 88 : *topparentrightsib = opaque->btpo_next;
2865 :
1063 pg 2866 ECB : /*
2867 : * Release lock on parent before recursing.
2868 : *
2869 : * It's OK to release page locks on parent before recursive call locks
2870 : * grandparent. An internal page can only acquire an entry if the child
2871 : * is split, but that cannot happen as long as we still hold a lock on the
2872 : * leafbuf page.
2873 : */
1063 pg 2874 GIC 88 : _bt_relbuf(rel, pbuf);
1063 pg 2875 ECB :
1063 pg 2876 EUB : /*
2877 : * Before recursing, check that the left sibling of parent (if any) is not
2878 : * marked with INCOMPLETE_SPLIT flag first (must do so after we drop the
1063 pg 2879 ECB : * parent lock).
2880 : *
2881 : * Note: We deliberately avoid completing incomplete splits here.
2882 : */
8 andres 2883 GNC 88 : if (_bt_leftsib_splitflag(rel, heaprel, leftsibparent, parent))
1063 pg 2884 UIC 0 : return false;
2885 :
2886 : /* Recurse to examine child page's grandparent page */
8 andres 2887 GNC 88 : return _bt_lock_subtree_parent(rel, heaprel, parent, stack->bts_parent,
2888 : subtreeparent, poffset,
2889 : topparent, topparentrightsib);
2890 : }
2891 :
2892 : /*
2893 : * Initialize local memory state used by VACUUM for _bt_pendingfsm_finalize
2894 : * optimization.
2895 : *
2896 : * Called at the start of a btvacuumscan(). Caller's cleanuponly argument
2897 : * indicates if ongoing VACUUM has not (and will not) call btbulkdelete().
749 pg 2898 ECB : *
2899 : * We expect to allocate memory inside VACUUM's top-level memory context here.
2900 : * The working buffer is subject to a limit based on work_mem. Our strategy
2901 : * when the array can no longer grow within the bounds of that limit is to
2902 : * stop saving additional newly deleted pages, while proceeding as usual with
2903 : * the pages that we can fit.
2904 : */
2905 : void
749 pg 2906 GIC 4016 : _bt_pendingfsm_init(Relation rel, BTVacState *vstate, bool cleanuponly)
2907 : {
749 pg 2908 ECB : int64 maxbufsize;
2909 :
2910 : /*
2911 : * Don't bother with optimization in cleanup-only case -- we don't expect
2912 : * any newly deleted pages. Besides, cleanup-only calls to btvacuumscan()
2913 : * can only take place because this optimization didn't work out during
2914 : * the last VACUUM.
2915 : */
749 pg 2916 CBC 4016 : if (cleanuponly)
2917 5 : return;
749 pg 2918 ECB :
2919 : /*
2920 : * Cap maximum size of array so that we always respect work_mem. Avoid
2921 : * int overflow here.
2922 : */
749 pg 2923 GIC 4011 : vstate->bufsize = 256;
749 pg 2924 CBC 4011 : maxbufsize = (work_mem * 1024L) / sizeof(BTPendingFSM);
2925 4011 : maxbufsize = Min(maxbufsize, INT_MAX);
749 pg 2926 GIC 4011 : maxbufsize = Min(maxbufsize, MaxAllocSize / sizeof(BTPendingFSM));
2927 : /* Stay sane with small work_mem */
2928 4011 : maxbufsize = Max(maxbufsize, vstate->bufsize);
2929 4011 : vstate->maxbufsize = maxbufsize;
2930 :
2931 : /* Allocate buffer, indicate that there are currently 0 pending pages */
2932 4011 : vstate->pendingpages = palloc(sizeof(BTPendingFSM) * vstate->bufsize);
2933 4011 : vstate->npendingpages = 0;
2934 : }
2935 :
2936 : /*
2937 : * Place any newly deleted pages (i.e. pages that _bt_pagedel() deleted during
2938 : * the ongoing VACUUM operation) into the free space map -- though only when
749 pg 2939 ECB : * it is actually safe to do so by now.
2940 : *
2941 : * Called at the end of a btvacuumscan(), just before free space map vacuuming
2942 : * takes place.
2943 : *
2944 : * Frees memory allocated by _bt_pendingfsm_init(), if any.
2945 : */
2946 : void
749 pg 2947 GIC 4016 : _bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
2948 : {
749 pg 2949 CBC 4016 : IndexBulkDeleteResult *stats = vstate->stats;
6 pg 2950 GNC 4016 : Relation heaprel = vstate->info->heaprel;
749 pg 2951 ECB :
749 pg 2952 GIC 4016 : Assert(stats->pages_newly_deleted >= vstate->npendingpages);
749 pg 2953 ECB :
749 pg 2954 GIC 4016 : if (vstate->npendingpages == 0)
2955 : {
2956 : /* Just free memory when nothing to do */
2957 3958 : if (vstate->pendingpages)
2958 3953 : pfree(vstate->pendingpages);
2959 :
2960 3958 : return;
2961 : }
2962 :
2963 : #ifdef DEBUG_BTREE_PENDING_FSM
2964 :
2965 : /*
2966 : * Debugging aid: Sleep for 5 seconds to greatly increase the chances of
2967 : * placing pending pages in the FSM. Note that the optimization will
2968 : * never be effective without some other backend concurrently consuming an
2969 : * XID.
2970 : */
2971 : pg_usleep(5000000L);
2972 : #endif
2973 :
2974 : /*
2975 : * Recompute VACUUM XID boundaries.
749 pg 2976 ECB : *
2977 : * We don't actually care about the oldest non-removable XID. Computing
2978 : * the oldest such XID has a useful side-effect that we rely on: it
2979 : * forcibly updates the XID horizon state for this backend. This step is
2980 : * essential; GlobalVisCheckRemovableFullXid() will not reliably recognize
2981 : * that it is now safe to recycle newly deleted pages without this step.
2982 : */
6 pg 2983 GNC 58 : GetOldestNonRemovableTransactionId(heaprel);
2984 :
749 pg 2985 GIC 268 : for (int i = 0; i < vstate->npendingpages; i++)
2986 : {
2987 268 : BlockNumber target = vstate->pendingpages[i].target;
2988 268 : FullTransactionId safexid = vstate->pendingpages[i].safexid;
2989 :
2990 : /*
749 pg 2991 ECB : * Do the equivalent of checking BTPageIsRecyclable(), but without
2992 : * accessing the page again a second time.
2993 : *
2994 : * Give up on finding the first non-recyclable page -- all later pages
2995 : * must be non-recyclable too, since _bt_pendingfsm_add() adds pages
2996 : * to the array in safexid order.
2997 : */
6 pg 2998 GNC 268 : if (!GlobalVisCheckRemovableFullXid(heaprel, safexid))
749 pg 2999 GIC 58 : break;
3000 :
3001 210 : RecordFreeIndexPage(rel, target);
3002 210 : stats->pages_free++;
3003 : }
3004 :
3005 58 : pfree(vstate->pendingpages);
749 pg 3006 ECB : }
3007 :
3008 : /*
3009 : * Maintain array of pages that were deleted during current btvacuumscan()
3010 : * call, for use in _bt_pendingfsm_finalize()
3011 : */
3012 : static void
749 pg 3013 GIC 2755 : _bt_pendingfsm_add(BTVacState *vstate,
3014 : BlockNumber target,
3015 : FullTransactionId safexid)
3016 : {
3017 2755 : Assert(vstate->npendingpages <= vstate->bufsize);
3018 2755 : Assert(vstate->bufsize <= vstate->maxbufsize);
3019 :
749 pg 3020 ECB : #ifdef USE_ASSERT_CHECKING
3021 :
3022 : /*
3023 : * Verify an assumption made by _bt_pendingfsm_finalize(): pages from the
3024 : * array will always be in safexid order (since that is the order that we
3025 : * save them in here)
3026 : */
749 pg 3027 GIC 2755 : if (vstate->npendingpages > 0)
3028 : {
3029 2697 : FullTransactionId lastsafexid =
3030 2697 : vstate->pendingpages[vstate->npendingpages - 1].safexid;
3031 :
3032 2697 : Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
3033 : }
3034 : #endif
3035 :
749 pg 3036 ECB : /*
749 pg 3037 EUB : * If temp buffer reaches maxbufsize/work_mem capacity then we discard
3038 : * information about this page.
3039 : *
749 pg 3040 ECB : * Note that this also covers the case where we opted to not use the
3041 : * optimization in _bt_pendingfsm_init().
3042 : */
749 pg 3043 GIC 2755 : if (vstate->npendingpages == vstate->maxbufsize)
749 pg 3044 UIC 0 : return;
749 pg 3045 ECB :
749 pg 3046 EUB : /* Consider enlarging buffer */
749 pg 3047 GIC 2755 : if (vstate->npendingpages == vstate->bufsize)
749 pg 3048 ECB : {
749 pg 3049 CBC 4 : int newbufsize = vstate->bufsize * 2;
749 pg 3050 ECB :
3051 : /* Respect work_mem */
749 pg 3052 GIC 4 : if (newbufsize > vstate->maxbufsize)
749 pg 3053 UIC 0 : newbufsize = vstate->maxbufsize;
3054 :
749 pg 3055 CBC 4 : vstate->bufsize = newbufsize;
3056 4 : vstate->pendingpages =
3057 4 : repalloc(vstate->pendingpages,
749 pg 3058 GIC 4 : sizeof(BTPendingFSM) * vstate->bufsize);
3059 : }
3060 :
3061 : /* Save metadata for newly deleted page */
3062 2755 : vstate->pendingpages[vstate->npendingpages].target = target;
3063 2755 : vstate->pendingpages[vstate->npendingpages].safexid = safexid;
3064 2755 : vstate->npendingpages++;
3065 : }
|