Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * tableam.h
4 : * POSTGRES table access method definitions.
5 : *
6 : *
7 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
8 : * Portions Copyright (c) 1994, Regents of the University of California
9 : *
10 : * src/include/access/tableam.h
11 : *
12 : * NOTES
13 : * See tableam.sgml for higher level documentation.
14 : *
15 : *-------------------------------------------------------------------------
16 : */
17 : #ifndef TABLEAM_H
18 : #define TABLEAM_H
19 :
20 : #include "access/relscan.h"
21 : #include "access/sdir.h"
22 : #include "access/xact.h"
23 : #include "executor/tuptable.h"
24 : #include "utils/rel.h"
25 : #include "utils/snapshot.h"
26 :
27 :
28 : #define DEFAULT_TABLE_ACCESS_METHOD "heap"
29 :
30 : /* GUCs */
31 : extern PGDLLIMPORT char *default_table_access_method;
32 : extern PGDLLIMPORT bool synchronize_seqscans;
33 :
34 :
35 : struct BulkInsertStateData;
36 : struct IndexInfo;
37 : struct SampleScanState;
38 : struct TBMIterateResult;
39 : struct VacuumParams;
40 : struct ValidateIndexState;
41 :
42 : /*
43 : * Bitmask values for the flags argument to the scan_begin callback.
44 : */
45 : typedef enum ScanOptions
46 : {
47 : /* one of SO_TYPE_* may be specified */
48 : SO_TYPE_SEQSCAN = 1 << 0,
49 : SO_TYPE_BITMAPSCAN = 1 << 1,
50 : SO_TYPE_SAMPLESCAN = 1 << 2,
51 : SO_TYPE_TIDSCAN = 1 << 3,
52 : SO_TYPE_TIDRANGESCAN = 1 << 4,
53 : SO_TYPE_ANALYZE = 1 << 5,
54 :
55 : /* several of SO_ALLOW_* may be specified */
56 : /* allow or disallow use of access strategy */
57 : SO_ALLOW_STRAT = 1 << 6,
58 : /* report location to syncscan logic? */
59 : SO_ALLOW_SYNC = 1 << 7,
60 : /* verify visibility page-at-a-time? */
61 : SO_ALLOW_PAGEMODE = 1 << 8,
62 :
63 : /* unregister snapshot at scan end? */
64 : SO_TEMP_SNAPSHOT = 1 << 9
65 : } ScanOptions;
66 :
67 : /*
68 : * Result codes for table_{update,delete,lock_tuple}, and for visibility
69 : * routines inside table AMs.
70 : */
71 : typedef enum TM_Result
72 : {
73 : /*
74 : * Signals that the action succeeded (i.e. update/delete performed, lock
75 : * was acquired)
76 : */
77 : TM_Ok,
78 :
79 : /* The affected tuple wasn't visible to the relevant snapshot */
80 : TM_Invisible,
81 :
82 : /* The affected tuple was already modified by the calling backend */
83 : TM_SelfModified,
84 :
85 : /*
86 : * The affected tuple was updated by another transaction. This includes
87 : * the case where tuple was moved to another partition.
88 : */
89 : TM_Updated,
90 :
91 : /* The affected tuple was deleted by another transaction */
92 : TM_Deleted,
93 :
94 : /*
95 : * The affected tuple is currently being modified by another session. This
96 : * will only be returned if table_(update/delete/lock_tuple) are
97 : * instructed not to wait.
98 : */
99 : TM_BeingModified,
100 :
101 : /* lock couldn't be acquired, action skipped. Only used by lock_tuple */
102 : TM_WouldBlock
103 : } TM_Result;
104 :
105 : /*
106 : * Result codes for table_update(..., update_indexes*..).
107 : * Used to determine which indexes to update.
108 : */
109 : typedef enum TU_UpdateIndexes
110 : {
111 : /* No indexed columns were updated (incl. TID addressing of tuple) */
112 : TU_None,
113 :
114 : /* A non-summarizing indexed column was updated, or the TID has changed */
115 : TU_All,
116 :
117 : /* Only summarized columns were updated, TID is unchanged */
118 : TU_Summarizing
119 : } TU_UpdateIndexes;
120 :
121 : /*
122 : * When table_tuple_update, table_tuple_delete, or table_tuple_lock fail
123 : * because the target tuple is already outdated, they fill in this struct to
124 : * provide information to the caller about what happened.
125 : *
126 : * ctid is the target's ctid link: it is the same as the target's TID if the
127 : * target was deleted, or the location of the replacement tuple if the target
128 : * was updated.
129 : *
130 : * xmax is the outdating transaction's XID. If the caller wants to visit the
131 : * replacement tuple, it must check that this matches before believing the
132 : * replacement is really a match.
133 : *
134 : * cmax is the outdating command's CID, but only when the failure code is
135 : * TM_SelfModified (i.e., something in the current transaction outdated the
136 : * tuple); otherwise cmax is zero. (We make this restriction because
137 : * HeapTupleHeaderGetCmax doesn't work for tuples outdated in other
138 : * transactions.)
139 : */
140 : typedef struct TM_FailureData
141 : {
142 : ItemPointerData ctid;
143 : TransactionId xmax;
144 : CommandId cmax;
145 : bool traversed;
146 : } TM_FailureData;
147 :
148 : /*
149 : * State used when calling table_index_delete_tuples().
150 : *
151 : * Represents the status of table tuples, referenced by table TID and taken by
152 : * index AM from index tuples. State consists of high level parameters of the
153 : * deletion operation, plus two mutable palloc()'d arrays for information
154 : * about the status of individual table tuples. These are conceptually one
155 : * single array. Using two arrays keeps the TM_IndexDelete struct small,
156 : * which makes sorting the first array (the deltids array) fast.
157 : *
158 : * Some index AM callers perform simple index tuple deletion (by specifying
159 : * bottomup = false), and include only known-dead deltids. These known-dead
160 : * entries are all marked knowndeletable = true directly (typically these are
161 : * TIDs from LP_DEAD-marked index tuples), but that isn't strictly required.
162 : *
163 : * Callers that specify bottomup = true are "bottom-up index deletion"
164 : * callers. The considerations for the tableam are more subtle with these
165 : * callers because they ask the tableam to perform highly speculative work,
166 : * and might only expect the tableam to check a small fraction of all entries.
167 : * Caller is not allowed to specify knowndeletable = true for any entry
168 : * because everything is highly speculative. Bottom-up caller provides
169 : * context and hints to tableam -- see comments below for details on how index
170 : * AMs and tableams should coordinate during bottom-up index deletion.
171 : *
172 : * Simple index deletion callers may ask the tableam to perform speculative
173 : * work, too. This is a little like bottom-up deletion, but not too much.
174 : * The tableam will only perform speculative work when it's practically free
175 : * to do so in passing for simple deletion caller (while always performing
176 : * whatever work is needed to enable knowndeletable/LP_DEAD index tuples to
177 : * be deleted within index AM). This is the real reason why it's possible for
178 : * simple index deletion caller to specify knowndeletable = false up front
179 : * (this means "check if it's possible for me to delete corresponding index
180 : * tuple when it's cheap to do so in passing"). The index AM should only
181 : * include "extra" entries for index tuples whose TIDs point to a table block
182 : * that tableam is expected to have to visit anyway (in the event of a block
183 : * orientated tableam). The tableam isn't strictly obligated to check these
184 : * "extra" TIDs, but a block-based AM should always manage to do so in
185 : * practice.
186 : *
187 : * The final contents of the deltids/status arrays are interesting to callers
188 : * that ask tableam to perform speculative work (i.e. when _any_ items have
189 : * knowndeletable set to false up front). These index AM callers will
190 : * naturally need to consult final state to determine which index tuples are
191 : * in fact deletable.
192 : *
193 : * The index AM can keep track of which index tuple relates to which deltid by
194 : * setting idxoffnum (and/or relying on each entry being uniquely identifiable
195 : * using tid), which is important when the final contents of the array will
196 : * need to be interpreted -- the array can shrink from initial size after
197 : * tableam processing and/or have entries in a new order (tableam may sort
198 : * deltids array for its own reasons). Bottom-up callers may find that final
199 : * ndeltids is 0 on return from call to tableam, in which case no index tuple
200 : * deletions are possible. Simple deletion callers can rely on any entries
201 : * they know to be deletable appearing in the final array as deletable.
202 : */
203 : typedef struct TM_IndexDelete
204 : {
205 : ItemPointerData tid; /* table TID from index tuple */
206 : int16 id; /* Offset into TM_IndexStatus array */
207 : } TM_IndexDelete;
208 :
209 : typedef struct TM_IndexStatus
210 : {
211 : OffsetNumber idxoffnum; /* Index am page offset number */
212 : bool knowndeletable; /* Currently known to be deletable? */
213 :
214 : /* Bottom-up index deletion specific fields follow */
215 : bool promising; /* Promising (duplicate) index tuple? */
216 : int16 freespace; /* Space freed in index if deleted */
217 : } TM_IndexStatus;
218 :
219 : /*
220 : * Index AM/tableam coordination is central to the design of bottom-up index
221 : * deletion. The index AM provides hints about where to look to the tableam
222 : * by marking some entries as "promising". Index AM does this with duplicate
223 : * index tuples that are strongly suspected to be old versions left behind by
224 : * UPDATEs that did not logically modify indexed values. Index AM may find it
225 : * helpful to only mark entries as promising when they're thought to have been
226 : * affected by such an UPDATE in the recent past.
227 : *
228 : * Bottom-up index deletion casts a wide net at first, usually by including
229 : * all TIDs on a target index page. It is up to the tableam to worry about
230 : * the cost of checking transaction status information. The tableam is in
231 : * control, but needs careful guidance from the index AM. Index AM requests
232 : * that bottomupfreespace target be met, while tableam measures progress
233 : * towards that goal by tallying the per-entry freespace value for known
234 : * deletable entries. (All !bottomup callers can just set these space related
235 : * fields to zero.)
236 : */
237 : typedef struct TM_IndexDeleteOp
238 : {
239 : Relation irel; /* Target index relation */
240 : BlockNumber iblknum; /* Index block number (for error reports) */
241 : bool bottomup; /* Bottom-up (not simple) deletion? */
242 : int bottomupfreespace; /* Bottom-up space target */
243 :
244 : /* Mutable per-TID information follows (index AM initializes entries) */
245 : int ndeltids; /* Current # of deltids/status elements */
246 : TM_IndexDelete *deltids;
247 : TM_IndexStatus *status;
248 : } TM_IndexDeleteOp;
249 :
250 : /* "options" flag bits for table_tuple_insert */
251 : /* TABLE_INSERT_SKIP_WAL was 0x0001; RelationNeedsWAL() now governs */
252 : #define TABLE_INSERT_SKIP_FSM 0x0002
253 : #define TABLE_INSERT_FROZEN 0x0004
254 : #define TABLE_INSERT_NO_LOGICAL 0x0008
255 :
256 : /* flag bits for table_tuple_lock */
257 : /* Follow tuples whose update is in progress if lock modes don't conflict */
258 : #define TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS (1 << 0)
259 : /* Follow update chain and lock latest version of tuple */
260 : #define TUPLE_LOCK_FLAG_FIND_LAST_VERSION (1 << 1)
261 :
262 :
263 : /* Typedef for callback function for table_index_build_scan */
264 : typedef void (*IndexBuildCallback) (Relation index,
265 : ItemPointer tid,
266 : Datum *values,
267 : bool *isnull,
268 : bool tupleIsAlive,
269 : void *state);
270 :
271 : /*
272 : * API struct for a table AM. Note this must be allocated in a
273 : * server-lifetime manner, typically as a static const struct, which then gets
274 : * returned by FormData_pg_am.amhandler.
275 : *
276 : * In most cases it's not appropriate to call the callbacks directly, use the
277 : * table_* wrapper functions instead.
278 : *
279 : * GetTableAmRoutine() asserts that required callbacks are filled in, remember
280 : * to update when adding a callback.
281 : */
282 : typedef struct TableAmRoutine
283 : {
284 : /* this must be set to T_TableAmRoutine */
285 : NodeTag type;
286 :
287 :
288 : /* ------------------------------------------------------------------------
289 : * Slot related callbacks.
290 : * ------------------------------------------------------------------------
291 : */
292 :
293 : /*
294 : * Return slot implementation suitable for storing a tuple of this AM.
295 : */
296 : const TupleTableSlotOps *(*slot_callbacks) (Relation rel);
297 :
298 :
299 : /* ------------------------------------------------------------------------
300 : * Table scan callbacks.
301 : * ------------------------------------------------------------------------
302 : */
303 :
304 : /*
305 : * Start a scan of `rel`. The callback has to return a TableScanDesc,
306 : * which will typically be embedded in a larger, AM specific, struct.
307 : *
308 : * If nkeys != 0, the results need to be filtered by those scan keys.
309 : *
310 : * pscan, if not NULL, will have already been initialized with
311 : * parallelscan_initialize(), and has to be for the same relation. Will
312 : * only be set coming from table_beginscan_parallel().
313 : *
314 : * `flags` is a bitmask indicating the type of scan (ScanOptions's
315 : * SO_TYPE_*, currently only one may be specified), options controlling
316 : * the scan's behaviour (ScanOptions's SO_ALLOW_*, several may be
317 : * specified, an AM may ignore unsupported ones) and whether the snapshot
318 : * needs to be deallocated at scan_end (ScanOptions's SO_TEMP_SNAPSHOT).
319 : */
320 : TableScanDesc (*scan_begin) (Relation rel,
321 : Snapshot snapshot,
322 : int nkeys, struct ScanKeyData *key,
323 : ParallelTableScanDesc pscan,
324 : uint32 flags);
325 :
326 : /*
327 : * Release resources and deallocate scan. If TableScanDesc.temp_snap,
328 : * TableScanDesc.rs_snapshot needs to be unregistered.
329 : */
330 : void (*scan_end) (TableScanDesc scan);
331 :
332 : /*
333 : * Restart relation scan. If set_params is set to true, allow_{strat,
334 : * sync, pagemode} (see scan_begin) changes should be taken into account.
335 : */
336 : void (*scan_rescan) (TableScanDesc scan, struct ScanKeyData *key,
337 : bool set_params, bool allow_strat,
338 : bool allow_sync, bool allow_pagemode);
339 :
340 : /*
341 : * Return next tuple from `scan`, store in slot.
342 : */
343 : bool (*scan_getnextslot) (TableScanDesc scan,
344 : ScanDirection direction,
345 : TupleTableSlot *slot);
346 :
347 : /*-----------
348 : * Optional functions to provide scanning for ranges of ItemPointers.
349 : * Implementations must either provide both of these functions, or neither
350 : * of them.
351 : *
352 : * Implementations of scan_set_tidrange must themselves handle
353 : * ItemPointers of any value. i.e, they must handle each of the following:
354 : *
355 : * 1) mintid or maxtid is beyond the end of the table; and
356 : * 2) mintid is above maxtid; and
357 : * 3) item offset for mintid or maxtid is beyond the maximum offset
358 : * allowed by the AM.
359 : *
360 : * Implementations can assume that scan_set_tidrange is always called
361 : * before can_getnextslot_tidrange or after scan_rescan and before any
362 : * further calls to scan_getnextslot_tidrange.
363 : */
364 : void (*scan_set_tidrange) (TableScanDesc scan,
365 : ItemPointer mintid,
366 : ItemPointer maxtid);
367 :
368 : /*
369 : * Return next tuple from `scan` that's in the range of TIDs defined by
370 : * scan_set_tidrange.
371 : */
372 : bool (*scan_getnextslot_tidrange) (TableScanDesc scan,
373 : ScanDirection direction,
374 : TupleTableSlot *slot);
375 :
376 : /* ------------------------------------------------------------------------
377 : * Parallel table scan related functions.
378 : * ------------------------------------------------------------------------
379 : */
380 :
381 : /*
382 : * Estimate the size of shared memory needed for a parallel scan of this
383 : * relation. The snapshot does not need to be accounted for.
384 : */
385 : Size (*parallelscan_estimate) (Relation rel);
386 :
387 : /*
388 : * Initialize ParallelTableScanDesc for a parallel scan of this relation.
389 : * `pscan` will be sized according to parallelscan_estimate() for the same
390 : * relation.
391 : */
392 : Size (*parallelscan_initialize) (Relation rel,
393 : ParallelTableScanDesc pscan);
394 :
395 : /*
396 : * Reinitialize `pscan` for a new scan. `rel` will be the same relation as
397 : * when `pscan` was initialized by parallelscan_initialize.
398 : */
399 : void (*parallelscan_reinitialize) (Relation rel,
400 : ParallelTableScanDesc pscan);
401 :
402 :
403 : /* ------------------------------------------------------------------------
404 : * Index Scan Callbacks
405 : * ------------------------------------------------------------------------
406 : */
407 :
408 : /*
409 : * Prepare to fetch tuples from the relation, as needed when fetching
410 : * tuples for an index scan. The callback has to return an
411 : * IndexFetchTableData, which the AM will typically embed in a larger
412 : * structure with additional information.
413 : *
414 : * Tuples for an index scan can then be fetched via index_fetch_tuple.
415 : */
416 : struct IndexFetchTableData *(*index_fetch_begin) (Relation rel);
417 :
418 : /*
419 : * Reset index fetch. Typically this will release cross index fetch
420 : * resources held in IndexFetchTableData.
421 : */
422 : void (*index_fetch_reset) (struct IndexFetchTableData *data);
423 :
424 : /*
425 : * Release resources and deallocate index fetch.
426 : */
427 : void (*index_fetch_end) (struct IndexFetchTableData *data);
428 :
429 : /*
430 : * Fetch tuple at `tid` into `slot`, after doing a visibility test
431 : * according to `snapshot`. If a tuple was found and passed the visibility
432 : * test, return true, false otherwise.
433 : *
434 : * Note that AMs that do not necessarily update indexes when indexed
435 : * columns do not change, need to return the current/correct version of
436 : * the tuple that is visible to the snapshot, even if the tid points to an
437 : * older version of the tuple.
438 : *
439 : * *call_again is false on the first call to index_fetch_tuple for a tid.
440 : * If there potentially is another tuple matching the tid, *call_again
441 : * needs to be set to true by index_fetch_tuple, signaling to the caller
442 : * that index_fetch_tuple should be called again for the same tid.
443 : *
444 : * *all_dead, if all_dead is not NULL, should be set to true by
445 : * index_fetch_tuple iff it is guaranteed that no backend needs to see
446 : * that tuple. Index AMs can use that to avoid returning that tid in
447 : * future searches.
448 : */
449 : bool (*index_fetch_tuple) (struct IndexFetchTableData *scan,
450 : ItemPointer tid,
451 : Snapshot snapshot,
452 : TupleTableSlot *slot,
453 : bool *call_again, bool *all_dead);
454 :
455 :
456 : /* ------------------------------------------------------------------------
457 : * Callbacks for non-modifying operations on individual tuples
458 : * ------------------------------------------------------------------------
459 : */
460 :
461 : /*
462 : * Fetch tuple at `tid` into `slot`, after doing a visibility test
463 : * according to `snapshot`. If a tuple was found and passed the visibility
464 : * test, returns true, false otherwise.
465 : */
466 : bool (*tuple_fetch_row_version) (Relation rel,
467 : ItemPointer tid,
468 : Snapshot snapshot,
469 : TupleTableSlot *slot);
470 :
471 : /*
472 : * Is tid valid for a scan of this relation.
473 : */
474 : bool (*tuple_tid_valid) (TableScanDesc scan,
475 : ItemPointer tid);
476 :
477 : /*
478 : * Return the latest version of the tuple at `tid`, by updating `tid` to
479 : * point at the newest version.
480 : */
481 : void (*tuple_get_latest_tid) (TableScanDesc scan,
482 : ItemPointer tid);
483 :
484 : /*
485 : * Does the tuple in `slot` satisfy `snapshot`? The slot needs to be of
486 : * the appropriate type for the AM.
487 : */
488 : bool (*tuple_satisfies_snapshot) (Relation rel,
489 : TupleTableSlot *slot,
490 : Snapshot snapshot);
491 :
492 : /* see table_index_delete_tuples() */
493 : TransactionId (*index_delete_tuples) (Relation rel,
494 : TM_IndexDeleteOp *delstate);
495 :
496 :
497 : /* ------------------------------------------------------------------------
498 : * Manipulations of physical tuples.
499 : * ------------------------------------------------------------------------
500 : */
501 :
502 : /* see table_tuple_insert() for reference about parameters */
503 : void (*tuple_insert) (Relation rel, TupleTableSlot *slot,
504 : CommandId cid, int options,
505 : struct BulkInsertStateData *bistate);
506 :
507 : /* see table_tuple_insert_speculative() for reference about parameters */
508 : void (*tuple_insert_speculative) (Relation rel,
509 : TupleTableSlot *slot,
510 : CommandId cid,
511 : int options,
512 : struct BulkInsertStateData *bistate,
513 : uint32 specToken);
514 :
515 : /* see table_tuple_complete_speculative() for reference about parameters */
516 : void (*tuple_complete_speculative) (Relation rel,
517 : TupleTableSlot *slot,
518 : uint32 specToken,
519 : bool succeeded);
520 :
521 : /* see table_multi_insert() for reference about parameters */
522 : void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
523 : CommandId cid, int options, struct BulkInsertStateData *bistate);
524 :
525 : /* see table_tuple_delete() for reference about parameters */
526 : TM_Result (*tuple_delete) (Relation rel,
527 : ItemPointer tid,
528 : CommandId cid,
529 : Snapshot snapshot,
530 : Snapshot crosscheck,
531 : bool wait,
532 : TM_FailureData *tmfd,
533 : bool changingPart);
534 :
535 : /* see table_tuple_update() for reference about parameters */
536 : TM_Result (*tuple_update) (Relation rel,
537 : ItemPointer otid,
538 : TupleTableSlot *slot,
539 : CommandId cid,
540 : Snapshot snapshot,
541 : Snapshot crosscheck,
542 : bool wait,
543 : TM_FailureData *tmfd,
544 : LockTupleMode *lockmode,
545 : TU_UpdateIndexes *update_indexes);
546 :
547 : /* see table_tuple_lock() for reference about parameters */
548 : TM_Result (*tuple_lock) (Relation rel,
549 : ItemPointer tid,
550 : Snapshot snapshot,
551 : TupleTableSlot *slot,
552 : CommandId cid,
553 : LockTupleMode mode,
554 : LockWaitPolicy wait_policy,
555 : uint8 flags,
556 : TM_FailureData *tmfd);
557 :
558 : /*
559 : * Perform operations necessary to complete insertions made via
560 : * tuple_insert and multi_insert with a BulkInsertState specified. In-tree
561 : * access methods ceased to use this.
562 : *
563 : * Typically callers of tuple_insert and multi_insert will just pass all
564 : * the flags that apply to them, and each AM has to decide which of them
565 : * make sense for it, and then only take actions in finish_bulk_insert for
566 : * those flags, and ignore others.
567 : *
568 : * Optional callback.
569 : */
570 : void (*finish_bulk_insert) (Relation rel, int options);
571 :
572 :
573 : /* ------------------------------------------------------------------------
574 : * DDL related functionality.
575 : * ------------------------------------------------------------------------
576 : */
577 :
578 : /*
579 : * This callback needs to create new relation storage for `rel`, with
580 : * appropriate durability behaviour for `persistence`.
581 : *
582 : * Note that only the subset of the relcache filled by
583 : * RelationBuildLocalRelation() can be relied upon and that the relation's
584 : * catalog entries will either not yet exist (new relation), or will still
585 : * reference the old relfilelocator.
586 : *
587 : * As output *freezeXid, *minmulti must be set to the values appropriate
588 : * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
589 : * fields to be filled they can be set to InvalidTransactionId and
590 : * InvalidMultiXactId, respectively.
591 : *
592 : * See also table_relation_set_new_filelocator().
593 : */
594 : void (*relation_set_new_filelocator) (Relation rel,
595 : const RelFileLocator *newrlocator,
596 : char persistence,
597 : TransactionId *freezeXid,
598 : MultiXactId *minmulti);
599 :
600 : /*
601 : * This callback needs to remove all contents from `rel`'s current
602 : * relfilelocator. No provisions for transactional behaviour need to be
603 : * made. Often this can be implemented by truncating the underlying
604 : * storage to its minimal size.
605 : *
606 : * See also table_relation_nontransactional_truncate().
607 : */
608 : void (*relation_nontransactional_truncate) (Relation rel);
609 :
610 : /*
611 : * See table_relation_copy_data().
612 : *
613 : * This can typically be implemented by directly copying the underlying
614 : * storage, unless it contains references to the tablespace internally.
615 : */
616 : void (*relation_copy_data) (Relation rel,
617 : const RelFileLocator *newrlocator);
618 :
619 : /* See table_relation_copy_for_cluster() */
620 : void (*relation_copy_for_cluster) (Relation NewTable,
621 : Relation OldTable,
622 : Relation OldIndex,
623 : bool use_sort,
624 : TransactionId OldestXmin,
625 : TransactionId *xid_cutoff,
626 : MultiXactId *multi_cutoff,
627 : double *num_tuples,
628 : double *tups_vacuumed,
629 : double *tups_recently_dead);
630 :
631 : /*
632 : * React to VACUUM command on the relation. The VACUUM can be triggered by
633 : * a user or by autovacuum. The specific actions performed by the AM will
634 : * depend heavily on the individual AM.
635 : *
636 : * On entry a transaction is already established, and the relation is
637 : * locked with a ShareUpdateExclusive lock.
638 : *
639 : * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through
640 : * this routine, even if (for ANALYZE) it is part of the same VACUUM
641 : * command.
642 : *
643 : * There probably, in the future, needs to be a separate callback to
644 : * integrate with autovacuum's scheduling.
645 : */
646 : void (*relation_vacuum) (Relation rel,
647 : struct VacuumParams *params,
648 : BufferAccessStrategy bstrategy);
649 :
650 : /*
651 : * Prepare to analyze block `blockno` of `scan`. The scan has been started
652 : * with table_beginscan_analyze(). See also
653 : * table_scan_analyze_next_block().
654 : *
655 : * The callback may acquire resources like locks that are held until
656 : * table_scan_analyze_next_tuple() returns false. It e.g. can make sense
657 : * to hold a lock until all tuples on a block have been analyzed by
658 : * scan_analyze_next_tuple.
659 : *
660 : * The callback can return false if the block is not suitable for
661 : * sampling, e.g. because it's a metapage that could never contain tuples.
662 : *
663 : * XXX: This obviously is primarily suited for block-based AMs. It's not
664 : * clear what a good interface for non block based AMs would be, so there
665 : * isn't one yet.
666 : */
667 : bool (*scan_analyze_next_block) (TableScanDesc scan,
668 : BlockNumber blockno,
669 : BufferAccessStrategy bstrategy);
670 :
671 : /*
672 : * See table_scan_analyze_next_tuple().
673 : *
674 : * Not every AM might have a meaningful concept of dead rows, in which
675 : * case it's OK to not increment *deadrows - but note that that may
676 : * influence autovacuum scheduling (see comment for relation_vacuum
677 : * callback).
678 : */
679 : bool (*scan_analyze_next_tuple) (TableScanDesc scan,
680 : TransactionId OldestXmin,
681 : double *liverows,
682 : double *deadrows,
683 : TupleTableSlot *slot);
684 :
685 : /* see table_index_build_range_scan for reference about parameters */
686 : double (*index_build_range_scan) (Relation table_rel,
687 : Relation index_rel,
688 : struct IndexInfo *index_info,
689 : bool allow_sync,
690 : bool anyvisible,
691 : bool progress,
692 : BlockNumber start_blockno,
693 : BlockNumber numblocks,
694 : IndexBuildCallback callback,
695 : void *callback_state,
696 : TableScanDesc scan);
697 :
698 : /* see table_index_validate_scan for reference about parameters */
699 : void (*index_validate_scan) (Relation table_rel,
700 : Relation index_rel,
701 : struct IndexInfo *index_info,
702 : Snapshot snapshot,
703 : struct ValidateIndexState *state);
704 :
705 :
706 : /* ------------------------------------------------------------------------
707 : * Miscellaneous functions.
708 : * ------------------------------------------------------------------------
709 : */
710 :
711 : /*
712 : * See table_relation_size().
713 : *
714 : * Note that currently a few callers use the MAIN_FORKNUM size to figure
715 : * out the range of potentially interesting blocks (brin, analyze). It's
716 : * probable that we'll need to revise the interface for those at some
717 : * point.
718 : */
719 : uint64 (*relation_size) (Relation rel, ForkNumber forkNumber);
720 :
721 :
722 : /*
723 : * This callback should return true if the relation requires a TOAST table
724 : * and false if it does not. It may wish to examine the relation's tuple
725 : * descriptor before making a decision, but if it uses some other method
726 : * of storing large values (or if it does not support them) it can simply
727 : * return false.
728 : */
729 : bool (*relation_needs_toast_table) (Relation rel);
730 :
731 : /*
732 : * This callback should return the OID of the table AM that implements
733 : * TOAST tables for this AM. If the relation_needs_toast_table callback
734 : * always returns false, this callback is not required.
735 : */
736 : Oid (*relation_toast_am) (Relation rel);
737 :
738 : /*
739 : * This callback is invoked when detoasting a value stored in a toast
740 : * table implemented by this AM. See table_relation_fetch_toast_slice()
741 : * for more details.
742 : */
743 : void (*relation_fetch_toast_slice) (Relation toastrel, Oid valueid,
744 : int32 attrsize,
745 : int32 sliceoffset,
746 : int32 slicelength,
747 : struct varlena *result);
748 :
749 :
750 : /* ------------------------------------------------------------------------
751 : * Planner related functions.
752 : * ------------------------------------------------------------------------
753 : */
754 :
755 : /*
756 : * See table_relation_estimate_size().
757 : *
758 : * While block oriented, it shouldn't be too hard for an AM that doesn't
759 : * internally use blocks to convert into a usable representation.
760 : *
761 : * This differs from the relation_size callback by returning size
762 : * estimates (both relation size and tuple count) for planning purposes,
763 : * rather than returning a currently correct estimate.
764 : */
765 : void (*relation_estimate_size) (Relation rel, int32 *attr_widths,
766 : BlockNumber *pages, double *tuples,
767 : double *allvisfrac);
768 :
769 :
770 : /* ------------------------------------------------------------------------
771 : * Executor related functions.
772 : * ------------------------------------------------------------------------
773 : */
774 :
775 : /*
776 : * Prepare to fetch / check / return tuples from `tbmres->blockno` as part
777 : * of a bitmap table scan. `scan` was started via table_beginscan_bm().
778 : * Return false if there are no tuples to be found on the page, true
779 : * otherwise.
780 : *
781 : * This will typically read and pin the target block, and do the necessary
782 : * work to allow scan_bitmap_next_tuple() to return tuples (e.g. it might
783 : * make sense to perform tuple visibility checks at this time). For some
784 : * AMs it will make more sense to do all the work referencing `tbmres`
785 : * contents here, for others it might be better to defer more work to
786 : * scan_bitmap_next_tuple.
787 : *
788 : * If `tbmres->blockno` is -1, this is a lossy scan and all visible tuples
789 : * on the page have to be returned, otherwise the tuples at offsets in
790 : * `tbmres->offsets` need to be returned.
791 : *
792 : * XXX: Currently this may only be implemented if the AM uses md.c as its
793 : * storage manager, and uses ItemPointer->ip_blkid in a manner that maps
794 : * blockids directly to the underlying storage. nodeBitmapHeapscan.c
795 : * performs prefetching directly using that interface. This probably
796 : * needs to be rectified at a later point.
797 : *
798 : * XXX: Currently this may only be implemented if the AM uses the
799 : * visibilitymap, as nodeBitmapHeapscan.c unconditionally accesses it to
800 : * perform prefetching. This probably needs to be rectified at a later
801 : * point.
802 : *
803 : * Optional callback, but either both scan_bitmap_next_block and
804 : * scan_bitmap_next_tuple need to exist, or neither.
805 : */
806 : bool (*scan_bitmap_next_block) (TableScanDesc scan,
807 : struct TBMIterateResult *tbmres);
808 :
809 : /*
810 : * Fetch the next tuple of a bitmap table scan into `slot` and return true
811 : * if a visible tuple was found, false otherwise.
812 : *
813 : * For some AMs it will make more sense to do all the work referencing
814 : * `tbmres` contents in scan_bitmap_next_block, for others it might be
815 : * better to defer more work to this callback.
816 : *
817 : * Optional callback, but either both scan_bitmap_next_block and
818 : * scan_bitmap_next_tuple need to exist, or neither.
819 : */
820 : bool (*scan_bitmap_next_tuple) (TableScanDesc scan,
821 : struct TBMIterateResult *tbmres,
822 : TupleTableSlot *slot);
823 :
824 : /*
825 : * Prepare to fetch tuples from the next block in a sample scan. Return
826 : * false if the sample scan is finished, true otherwise. `scan` was
827 : * started via table_beginscan_sampling().
828 : *
829 : * Typically this will first determine the target block by calling the
830 : * TsmRoutine's NextSampleBlock() callback if not NULL, or alternatively
831 : * perform a sequential scan over all blocks. The determined block is
832 : * then typically read and pinned.
833 : *
834 : * As the TsmRoutine interface is block based, a block needs to be passed
835 : * to NextSampleBlock(). If that's not appropriate for an AM, it
836 : * internally needs to perform mapping between the internal and a block
837 : * based representation.
838 : *
839 : * Note that it's not acceptable to hold deadlock prone resources such as
840 : * lwlocks until scan_sample_next_tuple() has exhausted the tuples on the
841 : * block - the tuple is likely to be returned to an upper query node, and
842 : * the next call could be off a long while. Holding buffer pins and such
843 : * is obviously OK.
844 : *
845 : * Currently it is required to implement this interface, as there's no
846 : * alternative way (contrary e.g. to bitmap scans) to implement sample
847 : * scans. If infeasible to implement, the AM may raise an error.
848 : */
849 : bool (*scan_sample_next_block) (TableScanDesc scan,
850 : struct SampleScanState *scanstate);
851 :
852 : /*
853 : * This callback, only called after scan_sample_next_block has returned
854 : * true, should determine the next tuple to be returned from the selected
855 : * block using the TsmRoutine's NextSampleTuple() callback.
856 : *
857 : * The callback needs to perform visibility checks, and only return
858 : * visible tuples. That obviously can mean calling NextSampleTuple()
859 : * multiple times.
860 : *
861 : * The TsmRoutine interface assumes that there's a maximum offset on a
862 : * given page, so if that doesn't apply to an AM, it needs to emulate that
863 : * assumption somehow.
864 : */
865 : bool (*scan_sample_next_tuple) (TableScanDesc scan,
866 : struct SampleScanState *scanstate,
867 : TupleTableSlot *slot);
868 :
869 : } TableAmRoutine;
870 :
871 :
872 : /* ----------------------------------------------------------------------------
873 : * Slot functions.
874 : * ----------------------------------------------------------------------------
875 : */
876 :
877 : /*
878 : * Returns slot callbacks suitable for holding tuples of the appropriate type
879 : * for the relation. Works for tables, views, foreign tables and partitioned
880 : * tables.
881 : */
882 : extern const TupleTableSlotOps *table_slot_callbacks(Relation relation);
883 :
884 : /*
885 : * Returns slot using the callbacks returned by table_slot_callbacks(), and
886 : * registers it on *reglist.
887 : */
888 : extern TupleTableSlot *table_slot_create(Relation relation, List **reglist);
889 :
890 :
891 : /* ----------------------------------------------------------------------------
892 : * Table scan functions.
893 : * ----------------------------------------------------------------------------
894 : */
895 :
896 : /*
897 : * Start a scan of `rel`. Returned tuples pass a visibility test of
898 : * `snapshot`, and if nkeys != 0, the results are filtered by those scan keys.
899 : */
900 : static inline TableScanDesc
1490 andres 901 GIC 82697 : table_beginscan(Relation rel, Snapshot snapshot,
902 : int nkeys, struct ScanKeyData *key)
903 : {
1421 904 82697 : uint32 flags = SO_TYPE_SEQSCAN |
905 : SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
906 :
907 82697 : return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
908 : }
909 :
910 : /*
911 : * Like table_beginscan(), but for scanning catalog. It'll automatically use a
912 : * snapshot appropriate for scanning catalog relations.
913 : */
914 : extern TableScanDesc table_beginscan_catalog(Relation relation, int nkeys,
915 : struct ScanKeyData *key);
916 :
1490 andres 917 ECB : /*
918 : * Like table_beginscan(), but table_beginscan_strat() offers an extended API
919 : * that lets the caller control whether a nondefault buffer access strategy
920 : * can be used, and whether syncscan can be chosen (possibly resulting in the
921 : * scan not starting from block zero). Both of these default to true with
922 : * plain table_beginscan.
923 : */
924 : static inline TableScanDesc
1490 andres 925 GIC 593084 : table_beginscan_strat(Relation rel, Snapshot snapshot,
926 : int nkeys, struct ScanKeyData *key,
927 : bool allow_strat, bool allow_sync)
928 : {
1421 929 593084 : uint32 flags = SO_TYPE_SEQSCAN | SO_ALLOW_PAGEMODE;
930 :
931 593084 : if (allow_strat)
932 593084 : flags |= SO_ALLOW_STRAT;
933 593084 : if (allow_sync)
934 64766 : flags |= SO_ALLOW_SYNC;
935 :
936 593084 : return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
937 : }
938 :
939 : /*
940 : * table_beginscan_bm is an alternative entry point for setting up a
1490 andres 941 ECB : * TableScanDesc for a bitmap heap scan. Although that scan technology is
942 : * really quite unlike a standard seqscan, there is just enough commonality to
943 : * make it worth using the same data structure.
944 : */
945 : static inline TableScanDesc
1490 andres 946 GIC 10598 : table_beginscan_bm(Relation rel, Snapshot snapshot,
1490 andres 947 ECB : int nkeys, struct ScanKeyData *key)
948 : {
1421 andres 949 CBC 10598 : uint32 flags = SO_TYPE_BITMAPSCAN | SO_ALLOW_PAGEMODE;
1421 andres 950 ECB :
1421 andres 951 GIC 10598 : return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
1490 andres 952 ECB : }
953 :
954 : /*
955 : * table_beginscan_sampling is an alternative entry point for setting up a
956 : * TableScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth
957 : * using the same data structure although the behavior is rather different.
958 : * In addition to the options offered by table_beginscan_strat, this call
959 : * also allows control of whether page-mode visibility checking is used.
960 : */
961 : static inline TableScanDesc
1490 andres 962 CBC 73 : table_beginscan_sampling(Relation rel, Snapshot snapshot,
963 : int nkeys, struct ScanKeyData *key,
964 : bool allow_strat, bool allow_sync,
1471 andres 965 ECB : bool allow_pagemode)
966 : {
1421 andres 967 CBC 73 : uint32 flags = SO_TYPE_SAMPLESCAN;
968 :
1421 andres 969 GIC 73 : if (allow_strat)
970 67 : flags |= SO_ALLOW_STRAT;
971 73 : if (allow_sync)
972 33 : flags |= SO_ALLOW_SYNC;
973 73 : if (allow_pagemode)
974 61 : flags |= SO_ALLOW_PAGEMODE;
975 :
976 73 : return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
977 : }
1490 andres 978 ECB :
979 : /*
980 : * table_beginscan_tid is an alternative entry point for setting up a
981 : * TableScanDesc for a Tid scan. As with bitmap scans, it's worth using
982 : * the same data structure although the behavior is rather different.
1157 fujii 983 : */
984 : static inline TableScanDesc
1157 fujii 985 CBC 326 : table_beginscan_tid(Relation rel, Snapshot snapshot)
1157 fujii 986 ECB : {
1157 fujii 987 CBC 326 : uint32 flags = SO_TYPE_TIDSCAN;
1157 fujii 988 ECB :
1157 fujii 989 CBC 326 : return rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
1157 fujii 990 ECB : }
991 :
1490 andres 992 : /*
993 : * table_beginscan_analyze is an alternative entry point for setting up a
994 : * TableScanDesc for an ANALYZE scan. As with bitmap scans, it's worth using
995 : * the same data structure although the behavior is rather different.
996 : */
997 : static inline TableScanDesc
1490 andres 998 GIC 24256 : table_beginscan_analyze(Relation rel)
999 : {
1421 1000 24256 : uint32 flags = SO_TYPE_ANALYZE;
1421 andres 1001 ECB :
1421 andres 1002 GIC 24256 : return rel->rd_tableam->scan_begin(rel, NULL, 0, NULL, NULL, flags);
1490 andres 1003 ECB : }
1004 :
1005 : /*
1006 : * End relation scan.
1007 : */
1008 : static inline void
1490 andres 1009 GIC 836368 : table_endscan(TableScanDesc scan)
1010 : {
1011 836368 : scan->rs_rd->rd_tableam->scan_end(scan);
1012 836368 : }
1013 :
1490 andres 1014 ECB : /*
1015 : * Restart a relation scan.
1016 : */
1017 : static inline void
1490 andres 1018 CBC 380901 : table_rescan(TableScanDesc scan,
1019 : struct ScanKeyData *key)
1020 : {
1490 andres 1021 GIC 380901 : scan->rs_rd->rd_tableam->scan_rescan(scan, key, false, false, false, false);
1022 380901 : }
1023 :
1024 : /*
1490 andres 1025 ECB : * Restart a relation scan after changing params.
1026 : *
1027 : * This call allows changing the buffer strategy, syncscan, and pagemode
1028 : * options before starting a fresh scan. Note that although the actual use of
1029 : * syncscan might change (effectively, enabling or disabling reporting), the
1030 : * previously selected startblock will be kept.
1031 : */
1032 : static inline void
1490 andres 1033 GIC 15 : table_rescan_set_params(TableScanDesc scan, struct ScanKeyData *key,
1490 andres 1034 ECB : bool allow_strat, bool allow_sync, bool allow_pagemode)
1035 : {
1490 andres 1036 GIC 15 : scan->rs_rd->rd_tableam->scan_rescan(scan, key, true,
1490 andres 1037 ECB : allow_strat, allow_sync,
1038 : allow_pagemode);
1490 andres 1039 GIC 15 : }
1040 :
1041 : /*
1042 : * Update snapshot used by the scan.
1043 : */
1044 : extern void table_scan_update_snapshot(TableScanDesc scan, Snapshot snapshot);
1045 :
1046 : /*
1047 : * Return next tuple from `scan`, store in slot.
1048 : */
1490 andres 1049 ECB : static inline bool
1490 andres 1050 GIC 41930207 : table_scan_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1051 : {
1490 andres 1052 CBC 41930207 : slot->tts_tableOid = RelationGetRelid(sscan->rs_rd);
1053 :
1054 : /* We don't expect actual scans using NoMovementScanDirection */
67 drowley 1055 GNC 41930207 : Assert(direction == ForwardScanDirection ||
1056 : direction == BackwardScanDirection);
1057 :
1058 : /*
974 akapila 1059 ECB : * We don't expect direct calls to table_scan_getnextslot with valid
1060 : * CheckXidAlive for catalog or regular tables. See detailed comments in
1061 : * xact.c where these variables are declared.
1062 : */
974 akapila 1063 GIC 41930207 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 1064 UIC 0 : elog(ERROR, "unexpected table_scan_getnextslot call during logical decoding");
1065 :
1490 andres 1066 GIC 41930207 : return sscan->rs_rd->rd_tableam->scan_getnextslot(sscan, direction, slot);
1067 : }
1068 :
1069 : /* ----------------------------------------------------------------------------
771 drowley 1070 ECB : * TID Range scanning related functions.
1071 : * ----------------------------------------------------------------------------
1072 : */
1073 :
1074 : /*
1075 : * table_beginscan_tidrange is the entry point for setting up a TableScanDesc
1076 : * for a TID range scan.
1077 : */
1078 : static inline TableScanDesc
771 drowley 1079 GIC 56 : table_beginscan_tidrange(Relation rel, Snapshot snapshot,
1080 : ItemPointer mintid,
1081 : ItemPointer maxtid)
1082 : {
771 drowley 1083 ECB : TableScanDesc sscan;
771 drowley 1084 GBC 56 : uint32 flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE;
1085 :
771 drowley 1086 CBC 56 : sscan = rel->rd_tableam->scan_begin(rel, snapshot, 0, NULL, NULL, flags);
1087 :
1088 : /* Set the range of TIDs to scan */
771 drowley 1089 GIC 56 : sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
1090 :
1091 56 : return sscan;
1092 : }
1093 :
1094 : /*
1095 : * table_rescan_tidrange resets the scan position and sets the minimum and
1096 : * maximum TID range to scan for a TableScanDesc created by
1097 : * table_beginscan_tidrange.
1098 : */
771 drowley 1099 ECB : static inline void
771 drowley 1100 GIC 33 : table_rescan_tidrange(TableScanDesc sscan, ItemPointer mintid,
1101 : ItemPointer maxtid)
1102 : {
1103 : /* Ensure table_beginscan_tidrange() was used. */
771 drowley 1104 CBC 33 : Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
1105 :
1106 33 : sscan->rs_rd->rd_tableam->scan_rescan(sscan, NULL, false, false, false, false);
771 drowley 1107 GIC 33 : sscan->rs_rd->rd_tableam->scan_set_tidrange(sscan, mintid, maxtid);
1108 33 : }
771 drowley 1109 ECB :
1110 : /*
1111 : * Fetch the next tuple from `sscan` for a TID range scan created by
1112 : * table_beginscan_tidrange(). Stores the tuple in `slot` and returns true,
1113 : * or returns false if no more tuples exist in the range.
1114 : */
1115 : static inline bool
771 drowley 1116 GIC 2970 : table_scan_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
1117 : TupleTableSlot *slot)
1118 : {
1119 : /* Ensure table_beginscan_tidrange() was used. */
771 drowley 1120 CBC 2970 : Assert((sscan->rs_flags & SO_TYPE_TIDRANGESCAN) != 0);
1121 :
1122 : /* We don't expect actual scans using NoMovementScanDirection */
67 drowley 1123 GNC 2970 : Assert(direction == ForwardScanDirection ||
1124 : direction == BackwardScanDirection);
1125 :
771 drowley 1126 GIC 2970 : return sscan->rs_rd->rd_tableam->scan_getnextslot_tidrange(sscan,
1127 : direction,
771 drowley 1128 ECB : slot);
1129 : }
1130 :
1490 andres 1131 :
1132 : /* ----------------------------------------------------------------------------
1133 : * Parallel table scan related functions.
1134 : * ----------------------------------------------------------------------------
1135 : */
1136 :
1137 : /*
1138 : * Estimate the size of shared memory needed for a parallel scan of this
1139 : * relation.
1140 : */
1141 : extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
1142 :
1143 : /*
1144 : * Initialize ParallelTableScanDesc for a parallel scan of this
1145 : * relation. `pscan` needs to be sized according to parallelscan_estimate()
1146 : * for the same relation. Call this just once in the leader process; then,
1147 : * individual workers attach via table_beginscan_parallel.
1148 : */
1149 : extern void table_parallelscan_initialize(Relation rel,
1418 tgl 1150 : ParallelTableScanDesc pscan,
1151 : Snapshot snapshot);
1152 :
1153 : /*
1154 : * Begin a parallel scan. `pscan` needs to have been initialized with
1155 : * table_parallelscan_initialize(), for the same relation. The initialization
1156 : * does not need to have happened in this backend.
1157 : *
1158 : * Caller must hold a suitable lock on the relation.
1159 : */
1160 : extern TableScanDesc table_beginscan_parallel(Relation relation,
1161 : ParallelTableScanDesc pscan);
1162 :
1163 : /*
1164 : * Restart a parallel scan. Call this in the leader process. Caller is
1165 : * responsible for making sure that all workers have finished the scan
1166 : * beforehand.
1167 : */
1168 : static inline void
1490 andres 1169 GIC 114 : table_parallelscan_reinitialize(Relation rel, ParallelTableScanDesc pscan)
1170 : {
1171 114 : rel->rd_tableam->parallelscan_reinitialize(rel, pscan);
1172 114 : }
1173 :
1174 :
1175 : /* ----------------------------------------------------------------------------
1176 : * Index scan related functions.
1177 : * ----------------------------------------------------------------------------
1178 : */
1179 :
1180 : /*
1181 : * Prepare to fetch tuples from the relation, as needed when fetching tuples
1182 : * for an index scan.
1183 : *
1184 : * Tuples for an index scan can then be fetched via table_index_fetch_tuple().
1185 : */
1186 : static inline IndexFetchTableData *
1187 14713594 : table_index_fetch_begin(Relation rel)
1188 : {
1189 14713594 : return rel->rd_tableam->index_fetch_begin(rel);
1190 : }
1191 :
1192 : /*
1490 andres 1193 ECB : * Reset index fetch. Typically this will release cross index fetch resources
1194 : * held in IndexFetchTableData.
1195 : */
1196 : static inline void
1490 andres 1197 GIC 14329376 : table_index_fetch_reset(struct IndexFetchTableData *scan)
1198 : {
1199 14329376 : scan->rel->rd_tableam->index_fetch_reset(scan);
1200 14329376 : }
1201 :
1202 : /*
1203 : * Release resources and deallocate index fetch.
1204 : */
1205 : static inline void
1206 14712913 : table_index_fetch_end(struct IndexFetchTableData *scan)
1207 : {
1208 14712913 : scan->rel->rd_tableam->index_fetch_end(scan);
1209 14712913 : }
1210 :
1490 andres 1211 ECB : /*
1212 : * Fetches, as part of an index scan, tuple at `tid` into `slot`, after doing
1476 1213 : * a visibility test according to `snapshot`. If a tuple was found and passed
1214 : * the visibility test, returns true, false otherwise. Note that *tid may be
1215 : * modified when we return true (see later remarks on multiple row versions
1216 : * reachable via a single index entry).
1217 : *
1218 : * *call_again needs to be false on the first call to table_index_fetch_tuple() for
1219 : * a tid. If there potentially is another tuple matching the tid, *call_again
1220 : * will be set to true, signaling that table_index_fetch_tuple() should be called
1490 1221 : * again for the same tid.
1222 : *
1476 1223 : * *all_dead, if all_dead is not NULL, will be set to true by
1224 : * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see
1225 : * that tuple. Index AMs can use that to avoid returning that tid in future
1226 : * searches.
1227 : *
1228 : * The difference between this function and table_tuple_fetch_row_version()
1229 : * is that this function returns the currently visible version of a row if
999 michael 1230 : * the AM supports storing multiple row versions reachable via a single index
1231 : * entry (like heap's HOT). Whereas table_tuple_fetch_row_version() only
1232 : * evaluates the tuple exactly at `tid`. Outside of index entry ->table tuple
1233 : * lookups, table_tuple_fetch_row_version() is what's usually needed.
1234 : */
1235 : static inline bool
1490 andres 1236 GIC 21148791 : table_index_fetch_tuple(struct IndexFetchTableData *scan,
1237 : ItemPointer tid,
1238 : Snapshot snapshot,
1239 : TupleTableSlot *slot,
1240 : bool *call_again, bool *all_dead)
1241 : {
1242 : /*
1243 : * We don't expect direct calls to table_index_fetch_tuple with valid
1244 : * CheckXidAlive for catalog or regular tables. See detailed comments in
1245 : * xact.c where these variables are declared.
1246 : */
974 akapila 1247 21148791 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 1248 UIC 0 : elog(ERROR, "unexpected table_index_fetch_tuple call during logical decoding");
1249 :
1490 andres 1250 GIC 21148791 : return scan->rel->rd_tableam->index_fetch_tuple(scan, tid, snapshot,
1251 : slot, call_again,
1252 : all_dead);
1253 : }
1254 :
1255 : /*
1256 : * This is a convenience wrapper around table_index_fetch_tuple() which
1257 : * returns whether there are table tuple items corresponding to an index
1258 : * entry. This likely is only useful to verify if there's a conflict in a
1259 : * unique index.
1476 andres 1260 ECB : */
1261 : extern bool table_index_fetch_tuple_check(Relation rel,
1262 : ItemPointer tid,
1263 : Snapshot snapshot,
1264 : bool *all_dead);
1265 :
1266 :
1267 : /* ------------------------------------------------------------------------
1268 : * Functions for non-modifying operations on individual tuples
1269 : * ------------------------------------------------------------------------
1270 : */
1495 1271 :
1476 andres 1272 EUB :
1273 : /*
1462 andres 1274 ECB : * Fetch tuple at `tid` into `slot`, after doing a visibility test according to
1275 : * `snapshot`. If a tuple was found and passed the visibility test, returns
1276 : * true, false otherwise.
1277 : *
1278 : * See table_index_fetch_tuple's comment about what the difference between
1279 : * these functions is. It is correct to use this function outside of index
1280 : * entry->table tuple lookups.
1281 : */
1282 : static inline bool
1417 andres 1283 GIC 195976 : table_tuple_fetch_row_version(Relation rel,
1284 : ItemPointer tid,
1285 : Snapshot snapshot,
1286 : TupleTableSlot *slot)
1287 : {
1288 : /*
1289 : * We don't expect direct calls to table_tuple_fetch_row_version with
1290 : * valid CheckXidAlive for catalog or regular tables. See detailed
1291 : * comments in xact.c where these variables are declared.
1292 : */
974 akapila 1293 195976 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 1294 UIC 0 : elog(ERROR, "unexpected table_tuple_fetch_row_version call during logical decoding");
1295 :
1476 andres 1296 GIC 195976 : return rel->rd_tableam->tuple_fetch_row_version(rel, tid, snapshot, slot);
1297 : }
1298 :
1299 : /*
1300 : * Verify that `tid` is a potentially valid tuple identifier. That doesn't
1301 : * mean that the pointed to row needs to exist or be visible, but that
1302 : * attempting to fetch the row (e.g. with table_tuple_get_latest_tid() or
1303 : * table_tuple_fetch_row_version()) should not error out if called with that
1304 : * tid.
1305 : *
1306 : * `scan` needs to have been started via table_beginscan().
1476 andres 1307 ECB : */
1308 : static inline bool
1423 andres 1309 GIC 139 : table_tuple_tid_valid(TableScanDesc scan, ItemPointer tid)
1310 : {
1311 139 : return scan->rs_rd->rd_tableam->tuple_tid_valid(scan, tid);
1312 : }
1313 :
1314 : /*
1315 : * Return the latest version of the tuple at `tid`, by updating `tid` to
1316 : * point at the newest version.
1423 andres 1317 ECB : */
1417 andres 1318 EUB : extern void table_tuple_get_latest_tid(TableScanDesc scan, ItemPointer tid);
1319 :
1495 andres 1320 ECB : /*
1321 : * Return true iff tuple in slot satisfies the snapshot.
1322 : *
1323 : * This assumes the slot's tuple is valid, and of the appropriate type for the
1324 : * AM.
1325 : *
1326 : * Some AMs might modify the data underlying the tuple as a side-effect. If so
1327 : * they ought to mark the relevant buffer dirty.
1328 : */
1329 : static inline bool
1471 andres 1330 GIC 91614 : table_tuple_satisfies_snapshot(Relation rel, TupleTableSlot *slot,
1331 : Snapshot snapshot)
1332 : {
1490 andres 1333 CBC 91614 : return rel->rd_tableam->tuple_satisfies_snapshot(rel, slot, snapshot);
1334 : }
1490 andres 1335 ECB :
1336 : /*
1337 : * Determine which index tuples are safe to delete based on their table TID.
1338 : *
1339 : * Determines which entries from index AM caller's TM_IndexDeleteOp state
1340 : * point to vacuumable table tuples. Entries that are found by tableam to be
1341 : * vacuumable are naturally safe for index AM to delete, and so get directly
1342 : * marked as deletable. See comments above TM_IndexDelete and comments above
1343 : * TM_IndexDeleteOp for full details.
1344 : *
1345 : * Returns a snapshotConflictHorizon transaction ID that caller places in
1346 : * its index deletion WAL record. This might be used during subsequent REDO
1347 : * of the WAL record when in Hot Standby mode -- a recovery conflict for the
1348 : * index deletion operation might be required on the standby.
1349 : */
1350 : static inline TransactionId
816 pg 1351 GIC 8281 : table_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
1352 : {
1353 8281 : return rel->rd_tableam->index_delete_tuples(rel, delstate);
1475 andres 1354 ECB : }
1355 :
1356 :
1478 1357 : /* ----------------------------------------------------------------------------
1358 : * Functions for manipulations of physical tuples.
1359 : * ----------------------------------------------------------------------------
1360 : */
1361 :
1362 : /*
1363 : * Insert a tuple from a slot into table AM routine.
1364 : *
1365 : * The options bitmask allows the caller to specify options that may change the
1366 : * behaviour of the AM. The AM will ignore options that it does not support.
1367 : *
1368 : * If the TABLE_INSERT_SKIP_FSM option is specified, AMs are free to not reuse
1369 : * free space in the relation. This can save some cycles when we know the
1370 : * relation is new and doesn't contain useful amounts of free space.
1371 : * TABLE_INSERT_SKIP_FSM is commonly passed directly to
1372 : * RelationGetBufferForTuple. See that method for more information.
1373 : *
1374 : * TABLE_INSERT_FROZEN should only be specified for inserts into
1375 : * relation storage created during the current subtransaction and when
1376 : * there are no prior snapshots or pre-existing portals open.
1377 : * This causes rows to be frozen, which is an MVCC violation and
1378 : * requires explicit options chosen by user.
1379 : *
1380 : * TABLE_INSERT_NO_LOGICAL force-disables the emitting of logical decoding
1381 : * information for the tuple. This should solely be used during table rewrites
1382 : * where RelationIsLogicallyLogged(relation) is not yet accurate for the new
1383 : * relation.
1384 : *
1385 : * Note that most of these options will be applied when inserting into the
1386 : * heap's TOAST table, too, if the tuple requires any out-of-line data.
1387 : *
1388 : * The BulkInsertState object (if any; bistate can be NULL for default
1389 : * behavior) is also just passed through to RelationGetBufferForTuple. If
1390 : * `bistate` is provided, table_finish_bulk_insert() needs to be called.
1391 : *
1392 : * On return the slot's tts_tid and tts_tableOid are updated to reflect the
1393 : * insertion. But note that any toasting of fields within the slot is NOT
1394 : * reflected in the slots contents.
1395 : */
1396 : static inline void
1417 andres 1397 GIC 7135637 : table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid,
1398 : int options, struct BulkInsertStateData *bistate)
1399 : {
1478 1400 7135637 : rel->rd_tableam->tuple_insert(rel, slot, cid, options,
1401 : bistate);
1402 7135625 : }
1403 :
1404 : /*
1405 : * Perform a "speculative insertion". These can be backed out afterwards
1406 : * without aborting the whole transaction. Other sessions can wait for the
1407 : * speculative insertion to be confirmed, turning it into a regular tuple, or
1408 : * aborted, as if it never existed. Speculatively inserted tuples behave as
1409 : * "value locks" of short duration, used to implement INSERT .. ON CONFLICT.
1410 : *
1411 : * A transaction having performed a speculative insertion has to either abort,
1412 : * or finish the speculative insertion with
1413 : * table_tuple_complete_speculative(succeeded = ...).
1414 : */
1415 : static inline void
1417 1416 2013 : table_tuple_insert_speculative(Relation rel, TupleTableSlot *slot,
1417 : CommandId cid, int options,
1418 : struct BulkInsertStateData *bistate,
1419 : uint32 specToken)
1420 : {
1478 andres 1421 CBC 2013 : rel->rd_tableam->tuple_insert_speculative(rel, slot, cid, options,
1422 : bistate, specToken);
1478 andres 1423 GIC 2013 : }
1478 andres 1424 ECB :
1425 : /*
1426 : * Complete "speculative insertion" started in the same transaction. If
1427 : * succeeded is true, the tuple is fully inserted, if false, it's removed.
1428 : */
1429 : static inline void
1417 andres 1430 GIC 2010 : table_tuple_complete_speculative(Relation rel, TupleTableSlot *slot,
1431 : uint32 specToken, bool succeeded)
1432 : {
1478 1433 2010 : rel->rd_tableam->tuple_complete_speculative(rel, slot, specToken,
1434 : succeeded);
1435 2010 : }
1436 :
1437 : /*
1438 : * Insert multiple tuples into a table.
1439 : *
999 michael 1440 ECB : * This is like table_tuple_insert(), but inserts multiple tuples in one
1441 : * operation. That's often faster than calling table_tuple_insert() in a loop,
1442 : * because e.g. the AM can reduce WAL logging and page locking overhead.
1443 : *
1444 : * Except for taking `nslots` tuples as input, and an array of TupleTableSlots
1466 andres 1445 : * in `slots`, the parameters for table_multi_insert() are the same as for
1446 : * table_tuple_insert().
1447 : *
1448 : * Note: this leaks memory into the current memory context. You can create a
1449 : * temporary context before calling this, if that's a problem.
1450 : */
1451 : static inline void
1466 andres 1452 GIC 1492 : table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
1453 : CommandId cid, int options, struct BulkInsertStateData *bistate)
1466 andres 1454 ECB : {
1466 andres 1455 GIC 1492 : rel->rd_tableam->multi_insert(rel, slots, nslots,
1456 : cid, options, bistate);
1466 andres 1457 CBC 1492 : }
1458 :
1478 andres 1459 ECB : /*
1460 : * Delete a tuple.
1461 : *
1462 : * NB: do not call this directly unless prepared to deal with
1463 : * concurrent-update conditions. Use simple_table_tuple_delete instead.
1464 : *
1465 : * Input parameters:
1466 : * relation - table to be modified (caller must hold suitable lock)
1467 : * tid - TID of tuple to be deleted
1468 : * cid - delete command ID (used for visibility test, and stored into
1469 : * cmax if successful)
1470 : * crosscheck - if not InvalidSnapshot, also check tuple against this
1471 : * wait - true if should wait for any conflicting update to commit/abort
1472 : * Output parameters:
1473 : * tmfd - filled in failure cases (see below)
1474 : * changingPart - true iff the tuple is being moved to another partition
1475 : * table due to an update of the partition key. Otherwise, false.
1476 : *
1477 : * Normal, successful return value is TM_Ok, which means we did actually
1478 : * delete it. Failure return codes are TM_SelfModified, TM_Updated, and
1462 1479 : * TM_BeingModified (the last only possible if wait == false).
1480 : *
1478 1481 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
1482 : * t_xmax, and, if possible, and, if possible, t_cmax. See comments for
1483 : * struct TM_FailureData for additional info.
1484 : */
1485 : static inline TM_Result
1417 andres 1486 GIC 883906 : table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
1487 : Snapshot snapshot, Snapshot crosscheck, bool wait,
1488 : TM_FailureData *tmfd, bool changingPart)
1489 : {
1478 1490 883906 : return rel->rd_tableam->tuple_delete(rel, tid, cid,
1491 : snapshot, crosscheck,
1492 : wait, tmfd, changingPart);
1493 : }
1494 :
1495 : /*
1496 : * Update a tuple.
1497 : *
1498 : * NB: do not call this directly unless you are prepared to deal with
1499 : * concurrent-update conditions. Use simple_table_tuple_update instead.
1500 : *
1501 : * Input parameters:
1502 : * relation - table to be modified (caller must hold suitable lock)
1503 : * otid - TID of old tuple to be replaced
1504 : * slot - newly constructed tuple data to store
1505 : * cid - update command ID (used for visibility test, and stored into
1506 : * cmax/cmin if successful)
1507 : * crosscheck - if not InvalidSnapshot, also check old tuple against this
1508 : * wait - true if should wait for any conflicting update to commit/abort
1509 : * Output parameters:
1478 andres 1510 ECB : * tmfd - filled in failure cases (see below)
1511 : * lockmode - filled with lock mode acquired on tuple
1512 : * update_indexes - in success cases this is set to true if new index entries
1513 : * are required for this tuple
6 akorotkov 1514 : *
1515 : * Normal, successful return value is TM_Ok, which means we did actually
1516 : * update it. Failure return codes are TM_SelfModified, TM_Updated, and
1517 : * TM_BeingModified (the last only possible if wait == false).
1518 : *
1519 : * On success, the slot's tts_tid and tts_tableOid are updated to match the new
1520 : * stored tuple; in particular, slot->tts_tid is set to the TID where the
1521 : * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
1522 : * update was done. However, any TOAST changes in the new tuple's
1523 : * data are not reflected into *newtup.
1524 : *
1525 : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
1526 : * t_xmax, and, if possible, t_cmax. See comments for struct TM_FailureData
1527 : * for additional info.
1528 : */
1529 : static inline TM_Result
1417 andres 1530 GIC 217778 : table_tuple_update(Relation rel, ItemPointer otid, TupleTableSlot *slot,
1531 : CommandId cid, Snapshot snapshot, Snapshot crosscheck,
1532 : bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode,
1533 : TU_UpdateIndexes *update_indexes)
1534 : {
1478 1535 217778 : return rel->rd_tableam->tuple_update(rel, otid, slot,
1536 : cid, snapshot, crosscheck,
1537 : wait, tmfd,
1538 : lockmode, update_indexes);
1539 : }
1540 :
1541 : /*
1542 : * Lock a tuple in the specified mode.
1543 : *
1544 : * Input parameters:
1545 : * relation: relation containing tuple (caller must hold suitable lock)
1546 : * tid: TID of tuple to lock
1547 : * snapshot: snapshot to use for visibility determinations
1548 : * cid: current command ID (used for visibility test, and stored into
1549 : * tuple's cmax if lock is successful)
1550 : * mode: lock mode desired
1551 : * wait_policy: what to do if tuple lock is not available
1552 : * flags:
1553 : * If TUPLE_LOCK_FLAG_LOCK_UPDATE_IN_PROGRESS, follow the update chain to
1478 andres 1554 ECB : * also lock descendant tuples if lock modes don't conflict.
1555 : * If TUPLE_LOCK_FLAG_FIND_LAST_VERSION, follow the update chain and lock
1556 : * latest version.
1557 : *
1558 : * Output parameters:
1559 : * *slot: contains the target tuple
1560 : * *tmfd: filled in failure cases (see below)
1561 : *
1562 : * Function result may be:
1563 : * TM_Ok: lock was successfully acquired
1564 : * TM_Invisible: lock failed because tuple was never visible to us
1565 : * TM_SelfModified: lock failed because tuple updated by self
1566 : * TM_Updated: lock failed because tuple updated by other xact
1567 : * TM_Deleted: lock failed because tuple deleted by other xact
1568 : * TM_WouldBlock: lock couldn't be acquired and wait_policy is skip
1569 : *
1570 : * In the failure cases other than TM_Invisible and TM_Deleted, the routine
1571 : * fills *tmfd with the tuple's t_ctid, t_xmax, and, if possible, t_cmax. See
1572 : * comments for struct TM_FailureData for additional info.
1573 : */
1574 : static inline TM_Result
1417 andres 1575 GIC 82371 : table_tuple_lock(Relation rel, ItemPointer tid, Snapshot snapshot,
1576 : TupleTableSlot *slot, CommandId cid, LockTupleMode mode,
1577 : LockWaitPolicy wait_policy, uint8 flags,
1578 : TM_FailureData *tmfd)
1579 : {
1478 1580 82371 : return rel->rd_tableam->tuple_lock(rel, tid, snapshot, slot,
1581 : cid, mode, wait_policy,
1582 : flags, tmfd);
1583 : }
1584 :
1585 : /*
1586 : * Perform operations necessary to complete insertions made via
1587 : * tuple_insert and multi_insert with a BulkInsertState specified.
1588 : */
1589 : static inline void
1469 1590 2024 : table_finish_bulk_insert(Relation rel, int options)
1591 : {
1592 : /* optional callback */
1593 2024 : if (rel->rd_tableam && rel->rd_tableam->finish_bulk_insert)
1469 andres 1594 UIC 0 : rel->rd_tableam->finish_bulk_insert(rel, options);
1469 andres 1595 GIC 2024 : }
1596 :
1597 :
1598 : /* ------------------------------------------------------------------------
1474 andres 1599 ECB : * DDL related functionality.
1600 : * ------------------------------------------------------------------------
1601 : */
1602 :
1603 : /*
1604 : * Create storage for `rel` in `newrlocator`, with persistence set to
1605 : * `persistence`.
1606 : *
1607 : * This is used both during relation creation and various DDL operations to
1608 : * create new rel storage that can be filled from scratch. When creating
1609 : * new storage for an existing relfilelocator, this should be called before the
1610 : * relcache entry has been updated.
1611 : *
1612 : * *freezeXid, *minmulti are set to the xid / multixact horizon for the table
1613 : * that pg_class.{relfrozenxid, relminmxid} have to be set to.
1473 1614 : */
1615 : static inline void
277 rhaas 1616 GNC 55537 : table_relation_set_new_filelocator(Relation rel,
1617 : const RelFileLocator *newrlocator,
1618 : char persistence,
1619 : TransactionId *freezeXid,
1620 : MultiXactId *minmulti)
1621 : {
1622 55537 : rel->rd_tableam->relation_set_new_filelocator(rel, newrlocator,
1623 : persistence, freezeXid,
1624 : minmulti);
1473 andres 1625 GIC 55537 : }
1626 :
1627 : /*
1628 : * Remove all table contents from `rel`, in a non-transactional manner.
1629 : * Non-transactional meaning that there's no need to support rollbacks. This
1630 : * commonly only is used to perform truncations for relation storage created in
1631 : * the current transaction.
1632 : */
1633 : static inline void
1634 262 : table_relation_nontransactional_truncate(Relation rel)
1635 : {
1636 262 : rel->rd_tableam->relation_nontransactional_truncate(rel);
1637 262 : }
1638 :
1639 : /*
1640 : * Copy data from `rel` into the new relfilelocator `newrlocator`. The new
1641 : * relfilelocator may not have storage associated before this function is
1642 : * called. This is only supposed to be used for low level operations like
1643 : * changing a relation's tablespace.
1644 : */
1645 : static inline void
277 rhaas 1646 GNC 49 : table_relation_copy_data(Relation rel, const RelFileLocator *newrlocator)
1473 andres 1647 ECB : {
277 rhaas 1648 GNC 49 : rel->rd_tableam->relation_copy_data(rel, newrlocator);
1473 andres 1649 GIC 49 : }
1473 andres 1650 ECB :
1651 : /*
1652 : * Copy data from `OldTable` into `NewTable`, as part of a CLUSTER or VACUUM
1653 : * FULL.
1654 : *
1655 : * Additional Input parameters:
1656 : * - use_sort - if true, the table contents are sorted appropriate for
1657 : * `OldIndex`; if false and OldIndex is not InvalidOid, the data is copied
1658 : * in that index's order; if false and OldIndex is InvalidOid, no sorting is
1447 1659 : * performed
1660 : * - OldIndex - see use_sort
1661 : * - OldestXmin - computed by vacuum_get_cutoffs(), even when
1662 : * not needed for the relation's AM
1663 : * - *xid_cutoff - ditto
1664 : * - *multi_cutoff - ditto
1665 : *
1666 : * Output parameters:
1667 : * - *xid_cutoff - rel's new relfrozenxid value, may be invalid
1668 : * - *multi_cutoff - rel's new relminmxid value, may be invalid
1669 : * - *tups_vacuumed - stats, for logging, if appropriate for AM
1670 : * - *tups_recently_dead - stats, for logging, if appropriate for AM
1473 1671 : */
1672 : static inline void
1405 michael 1673 CBC 262 : table_relation_copy_for_cluster(Relation OldTable, Relation NewTable,
1473 andres 1674 ECB : Relation OldIndex,
1675 : bool use_sort,
1676 : TransactionId OldestXmin,
1677 : TransactionId *xid_cutoff,
1678 : MultiXactId *multi_cutoff,
1679 : double *num_tuples,
1680 : double *tups_vacuumed,
1681 : double *tups_recently_dead)
1682 : {
1405 michael 1683 GIC 262 : OldTable->rd_tableam->relation_copy_for_cluster(OldTable, NewTable, OldIndex,
1684 : use_sort, OldestXmin,
1685 : xid_cutoff, multi_cutoff,
1686 : num_tuples, tups_vacuumed,
1687 : tups_recently_dead);
1473 andres 1688 262 : }
1689 :
1690 : /*
1691 : * Perform VACUUM on the relation. The VACUUM can be triggered by a user or by
1692 : * autovacuum. The specific actions performed by the AM will depend heavily on
1693 : * the individual AM.
1694 : *
1695 : * On entry a transaction needs to already been established, and the
1696 : * table is locked with a ShareUpdateExclusive lock.
1697 : *
1471 andres 1698 ECB : * Note that neither VACUUM FULL (and CLUSTER), nor ANALYZE go through this
1699 : * routine, even if (for ANALYZE) it is part of the same VACUUM command.
1700 : */
1701 : static inline void
1471 andres 1702 GIC 36848 : table_relation_vacuum(Relation rel, struct VacuumParams *params,
1703 : BufferAccessStrategy bstrategy)
1704 : {
1705 36848 : rel->rd_tableam->relation_vacuum(rel, params, bstrategy);
1706 36848 : }
1707 :
1471 andres 1708 ECB : /*
1709 : * Prepare to analyze block `blockno` of `scan`. The scan needs to have been
1710 : * started with table_beginscan_analyze(). Note that this routine might
1711 : * acquire resources like locks that are held until
1712 : * table_scan_analyze_next_tuple() returns false.
1713 : *
1714 : * Returns false if block is unsuitable for sampling, true otherwise.
1715 : */
1716 : static inline bool
1471 andres 1717 GIC 147358 : table_scan_analyze_next_block(TableScanDesc scan, BlockNumber blockno,
1718 : BufferAccessStrategy bstrategy)
1719 : {
1720 147358 : return scan->rs_rd->rd_tableam->scan_analyze_next_block(scan, blockno,
1721 : bstrategy);
1722 : }
1723 :
1724 : /*
1725 : * Iterate over tuples in the block selected with
1726 : * table_scan_analyze_next_block() (which needs to have returned true, and
1471 andres 1727 ECB : * this routine may not have returned false for the same block before). If a
1728 : * tuple that's suitable for sampling is found, true is returned and a tuple
1729 : * is stored in `slot`.
1730 : *
1731 : * *liverows and *deadrows are incremented according to the encountered
1732 : * tuples.
1733 : */
1734 : static inline bool
1471 andres 1735 GIC 10303246 : table_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
1736 : double *liverows, double *deadrows,
1737 : TupleTableSlot *slot)
1738 : {
1739 10303246 : return scan->rs_rd->rd_tableam->scan_analyze_next_tuple(scan, OldestXmin,
1740 : liverows, deadrows,
1741 : slot);
1471 andres 1742 ECB : }
1743 :
1744 : /*
1462 1745 : * table_index_build_scan - scan the table to find tuples to be indexed
1746 : *
1747 : * This is called back from an access-method-specific index build procedure
1748 : * after the AM has done whatever setup it needs. The parent table relation
1749 : * is scanned to find tuples that should be entered into the index. Each
1750 : * such tuple is passed to the AM's callback routine, which does the right
1751 : * things to add it to the new index. After we return, the AM's index
1752 : * build procedure does whatever cleanup it needs.
1753 : *
1754 : * The total count of live tuples is returned. This is for updating pg_class
1755 : * statistics. (It's annoying not to be able to do that here, but we want to
1756 : * merge that update with others; see index_update_stats.) Note that the
1757 : * index AM itself must keep track of the number of index tuples; we don't do
1758 : * so here because the AM might reject some of the tuples for its own reasons,
1759 : * such as being unable to store NULLs.
1474 1760 : *
1761 : * If 'progress', the PROGRESS_SCAN_BLOCKS_TOTAL counter is updated when
1762 : * starting the scan, and PROGRESS_SCAN_BLOCKS_DONE is updated as we go along.
1763 : *
1764 : * A side effect is to set indexInfo->ii_BrokenHotChain to true if we detect
1765 : * any potentially broken HOT chains. Currently, we set this if there are any
1766 : * RECENTLY_DEAD or DELETE_IN_PROGRESS entries in a HOT chain, without trying
1767 : * very hard to detect whether they're really incompatible with the chain tip.
1768 : * This only really makes sense for heap AM, it might need to be generalized
1769 : * for other AMs later.
1770 : */
1771 : static inline double
1405 michael 1772 GIC 65098 : table_index_build_scan(Relation table_rel,
1773 : Relation index_rel,
1774 : struct IndexInfo *index_info,
1775 : bool allow_sync,
1776 : bool progress,
1777 : IndexBuildCallback callback,
1778 : void *callback_state,
1779 : TableScanDesc scan)
1780 : {
1781 65098 : return table_rel->rd_tableam->index_build_range_scan(table_rel,
1782 : index_rel,
1783 : index_info,
1784 : allow_sync,
1785 : false,
1786 : progress,
1787 : 0,
1788 : InvalidBlockNumber,
1789 : callback,
1790 : callback_state,
1791 : scan);
1792 : }
1793 :
1794 : /*
1795 : * As table_index_build_scan(), except that instead of scanning the complete
1796 : * table, only the given number of blocks are scanned. Scan to end-of-rel can
1036 peter 1797 ECB : * be signaled by passing InvalidBlockNumber as numblocks. Note that
1798 : * restricting the range to scan cannot be done when requesting syncscan.
1799 : *
1800 : * When "anyvisible" mode is requested, all tuples visible to any transaction
1801 : * are indexed and counted as live, including those inserted or deleted by
1802 : * transactions that are still in progress.
1803 : */
1804 : static inline double
1405 michael 1805 GIC 1467 : table_index_build_range_scan(Relation table_rel,
1474 andres 1806 ECB : Relation index_rel,
1807 : struct IndexInfo *index_info,
1808 : bool allow_sync,
1809 : bool anyvisible,
1810 : bool progress,
1811 : BlockNumber start_blockno,
1812 : BlockNumber numblocks,
1813 : IndexBuildCallback callback,
1814 : void *callback_state,
1815 : TableScanDesc scan)
1816 : {
1405 michael 1817 GIC 1467 : return table_rel->rd_tableam->index_build_range_scan(table_rel,
1818 : index_rel,
1819 : index_info,
1820 : allow_sync,
1821 : anyvisible,
1822 : progress,
1823 : start_blockno,
1824 : numblocks,
1825 : callback,
1826 : callback_state,
1827 : scan);
1828 : }
1829 :
1474 andres 1830 ECB : /*
1831 : * table_index_validate_scan - second table scan for concurrent index build
1832 : *
1833 : * See validate_index() for an explanation.
1834 : */
1835 : static inline void
1405 michael 1836 GIC 272 : table_index_validate_scan(Relation table_rel,
1837 : Relation index_rel,
1838 : struct IndexInfo *index_info,
1839 : Snapshot snapshot,
1840 : struct ValidateIndexState *state)
1841 : {
1405 michael 1842 CBC 272 : table_rel->rd_tableam->index_validate_scan(table_rel,
1843 : index_rel,
1844 : index_info,
1845 : snapshot,
1846 : state);
1474 andres 1847 GIC 272 : }
1848 :
1849 :
1850 : /* ----------------------------------------------------------------------------
1851 : * Miscellaneous functionality
1852 : * ----------------------------------------------------------------------------
1853 : */
1854 :
1855 : /*
1856 : * Return the current size of `rel` in bytes. If `forkNumber` is
1857 : * InvalidForkNumber, return the relation's overall size, otherwise the size
1858 : * for the indicated fork.
1859 : *
1860 : * Note that the overall size might not be the equivalent of the sum of sizes
1423 andres 1861 ECB : * for the individual forks for some AMs, e.g. because the AMs storage does
1862 : * not neatly map onto the builtin types of forks.
1863 : */
1864 : static inline uint64
1423 andres 1865 GIC 1614501 : table_relation_size(Relation rel, ForkNumber forkNumber)
1866 : {
1423 andres 1867 CBC 1614501 : return rel->rd_tableam->relation_size(rel, forkNumber);
1868 : }
1869 :
1870 : /*
1871 : * table_relation_needs_toast_table - does this relation need a toast table?
1419 rhaas 1872 ECB : */
1873 : static inline bool
1419 rhaas 1874 GIC 30583 : table_relation_needs_toast_table(Relation rel)
1875 : {
1876 30583 : return rel->rd_tableam->relation_needs_toast_table(rel);
1877 : }
1878 :
1879 : /*
1880 : * Return the OID of the AM that should be used to implement the TOAST table
1881 : * for this relation.
1882 : */
1883 : static inline Oid
1188 1884 18026 : table_relation_toast_am(Relation rel)
1885 : {
1886 18026 : return rel->rd_tableam->relation_toast_am(rel);
1887 : }
1888 :
1889 : /*
1188 rhaas 1890 ECB : * Fetch all or part of a TOAST value from a TOAST table.
1891 : *
1892 : * If this AM is never used to implement a TOAST table, then this callback
1893 : * is not needed. But, if toasted values are ever stored in a table of this
1894 : * type, then you will need this callback.
1895 : *
1896 : * toastrel is the relation in which the toasted value is stored.
1897 : *
1898 : * valueid identifes which toast value is to be fetched. For the heap,
1899 : * this corresponds to the values stored in the chunk_id column.
1900 : *
1901 : * attrsize is the total size of the toast value to be fetched.
1902 : *
1903 : * sliceoffset is the offset within the toast value of the first byte that
1904 : * should be fetched.
1905 : *
1906 : * slicelength is the number of bytes from the toast value that should be
1907 : * fetched.
1908 : *
1909 : * result is caller-allocated space into which the fetched bytes should be
1910 : * stored.
1911 : */
1912 : static inline void
1188 rhaas 1913 GIC 39145 : table_relation_fetch_toast_slice(Relation toastrel, Oid valueid,
1914 : int32 attrsize, int32 sliceoffset,
1915 : int32 slicelength, struct varlena *result)
1916 : {
1186 1917 39145 : toastrel->rd_tableam->relation_fetch_toast_slice(toastrel, valueid,
1918 : attrsize,
1919 : sliceoffset, slicelength,
1920 : result);
1188 1921 39145 : }
1922 :
1923 :
1924 : /* ----------------------------------------------------------------------------
1925 : * Planner related functionality
1926 : * ----------------------------------------------------------------------------
1927 : */
1928 :
1929 : /*
1930 : * Estimate the current size of the relation, as an AM specific workhorse for
1931 : * estimate_rel_size(). Look there for an explanation of the parameters.
1932 : */
1933 : static inline void
1471 andres 1934 174586 : table_relation_estimate_size(Relation rel, int32 *attr_widths,
1935 : BlockNumber *pages, double *tuples,
1936 : double *allvisfrac)
1937 : {
1471 andres 1938 CBC 174586 : rel->rd_tableam->relation_estimate_size(rel, attr_widths, pages, tuples,
1939 : allvisfrac);
1471 andres 1940 GIC 174586 : }
1941 :
1471 andres 1942 ECB :
1943 : /* ----------------------------------------------------------------------------
1944 : * Executor related functionality
1945 : * ----------------------------------------------------------------------------
1946 : */
1947 :
1948 : /*
1949 : * Prepare to fetch / check / return tuples from `tbmres->blockno` as part of
1950 : * a bitmap table scan. `scan` needs to have been started via
1951 : * table_beginscan_bm(). Returns false if there are no tuples to be found on
1952 : * the page, true otherwise.
1953 : *
1954 : * Note, this is an optionally implemented function, therefore should only be
1955 : * used after verifying the presence (at plan time or such).
1956 : */
1957 : static inline bool
1470 andres 1958 GIC 254448 : table_scan_bitmap_next_block(TableScanDesc scan,
1470 andres 1959 ECB : struct TBMIterateResult *tbmres)
1960 : {
1961 : /*
1962 : * We don't expect direct calls to table_scan_bitmap_next_block with valid
974 akapila 1963 : * CheckXidAlive for catalog or regular tables. See detailed comments in
1964 : * xact.c where these variables are declared.
1965 : */
974 akapila 1966 GIC 254448 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 1967 UIC 0 : elog(ERROR, "unexpected table_scan_bitmap_next_block call during logical decoding");
1968 :
1470 andres 1969 GIC 254448 : return scan->rs_rd->rd_tableam->scan_bitmap_next_block(scan,
1970 : tbmres);
1971 : }
1972 :
1973 : /*
1974 : * Fetch the next tuple of a bitmap table scan into `slot` and return true if
1975 : * a visible tuple was found, false otherwise.
1976 : * table_scan_bitmap_next_block() needs to previously have selected a
1977 : * block (i.e. returned true), and no previous
1978 : * table_scan_bitmap_next_tuple() for the same block may have
1979 : * returned false.
1980 : */
1981 : static inline bool
1982 4018959 : table_scan_bitmap_next_tuple(TableScanDesc scan,
1470 andres 1983 ECB : struct TBMIterateResult *tbmres,
1984 : TupleTableSlot *slot)
1985 : {
1986 : /*
1987 : * We don't expect direct calls to table_scan_bitmap_next_tuple with valid
1988 : * CheckXidAlive for catalog or regular tables. See detailed comments in
1989 : * xact.c where these variables are declared.
1990 : */
974 akapila 1991 CBC 4018959 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 1992 UBC 0 : elog(ERROR, "unexpected table_scan_bitmap_next_tuple call during logical decoding");
1993 :
1470 andres 1994 CBC 4018959 : return scan->rs_rd->rd_tableam->scan_bitmap_next_tuple(scan,
1995 : tbmres,
1996 : slot);
1997 : }
1998 :
1999 : /*
2000 : * Prepare to fetch tuples from the next block in a sample scan. Returns false
2001 : * if the sample scan is finished, true otherwise. `scan` needs to have been
2002 : * started via table_beginscan_sampling().
2003 : *
2004 : * This will call the TsmRoutine's NextSampleBlock() callback if necessary
2005 : * (i.e. NextSampleBlock is not NULL), or perform a sequential scan over the
2006 : * underlying relation.
1471 andres 2007 ECB : */
2008 : static inline bool
1471 andres 2009 GIC 6455 : table_scan_sample_next_block(TableScanDesc scan,
2010 : struct SampleScanState *scanstate)
2011 : {
2012 : /*
2013 : * We don't expect direct calls to table_scan_sample_next_block with valid
2014 : * CheckXidAlive for catalog or regular tables. See detailed comments in
2015 : * xact.c where these variables are declared.
974 akapila 2016 ECB : */
974 akapila 2017 GBC 6455 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 2018 UIC 0 : elog(ERROR, "unexpected table_scan_sample_next_block call during logical decoding");
1471 andres 2019 CBC 6455 : return scan->rs_rd->rd_tableam->scan_sample_next_block(scan, scanstate);
2020 : }
2021 :
2022 : /*
2023 : * Fetch the next sample tuple into `slot` and return true if a visible tuple
2024 : * was found, false otherwise. table_scan_sample_next_block() needs to
2025 : * previously have selected a block (i.e. returned true), and no previous
2026 : * table_scan_sample_next_tuple() for the same block may have returned false.
2027 : *
2028 : * This will call the TsmRoutine's NextSampleTuple() callback.
2029 : */
2030 : static inline bool
1471 andres 2031 GIC 126947 : table_scan_sample_next_tuple(TableScanDesc scan,
2032 : struct SampleScanState *scanstate,
2033 : TupleTableSlot *slot)
1471 andres 2034 ECB : {
2035 : /*
2036 : * We don't expect direct calls to table_scan_sample_next_tuple with valid
2037 : * CheckXidAlive for catalog or regular tables. See detailed comments in
2038 : * xact.c where these variables are declared.
2039 : */
974 akapila 2040 GIC 126947 : if (unlikely(TransactionIdIsValid(CheckXidAlive) && !bsysscan))
974 akapila 2041 UIC 0 : elog(ERROR, "unexpected table_scan_sample_next_tuple call during logical decoding");
1471 andres 2042 CBC 126947 : return scan->rs_rd->rd_tableam->scan_sample_next_tuple(scan, scanstate,
1471 andres 2043 EUB : slot);
1471 andres 2044 ECB : }
2045 :
2046 :
2047 : /* ----------------------------------------------------------------------------
2048 : * Functions to make modifications a bit simpler.
2049 : * ----------------------------------------------------------------------------
2050 : */
2051 :
2052 : extern void simple_table_tuple_insert(Relation rel, TupleTableSlot *slot);
2053 : extern void simple_table_tuple_delete(Relation rel, ItemPointer tid,
2054 : Snapshot snapshot);
2055 : extern void simple_table_tuple_update(Relation rel, ItemPointer otid,
1417 2056 : TupleTableSlot *slot, Snapshot snapshot,
2057 : TU_UpdateIndexes *update_indexes);
2058 :
2059 :
2060 : /* ----------------------------------------------------------------------------
2061 : * Helper functions to implement parallel scans for block oriented AMs.
2062 : * ----------------------------------------------------------------------------
2063 : */
2064 :
1490 2065 : extern Size table_block_parallelscan_estimate(Relation rel);
1490 andres 2066 EUB : extern Size table_block_parallelscan_initialize(Relation rel,
1418 tgl 2067 ECB : ParallelTableScanDesc pscan);
2068 : extern void table_block_parallelscan_reinitialize(Relation rel,
2069 : ParallelTableScanDesc pscan);
2070 : extern BlockNumber table_block_parallelscan_nextpage(Relation rel,
2071 : ParallelBlockTableScanWorker pbscanwork,
2072 : ParallelBlockTableScanDesc pbscan);
2073 : extern void table_block_parallelscan_startblock_init(Relation rel,
2074 : ParallelBlockTableScanWorker pbscanwork,
2075 : ParallelBlockTableScanDesc pbscan);
2076 :
2077 :
2078 : /* ----------------------------------------------------------------------------
2079 : * Helper functions to implement relation sizing for block oriented AMs.
2080 : * ----------------------------------------------------------------------------
2081 : */
2082 :
2083 : extern uint64 table_block_relation_size(Relation rel, ForkNumber forkNumber);
2084 : extern void table_block_relation_estimate_size(Relation rel,
2085 : int32 *attr_widths,
2086 : BlockNumber *pages,
2087 : double *tuples,
2088 : double *allvisfrac,
2089 : Size overhead_bytes_per_tuple,
2090 : Size usable_bytes_per_page);
2091 :
2092 : /* ----------------------------------------------------------------------------
2093 : * Functions in tableamapi.c
2094 : * ----------------------------------------------------------------------------
2095 : */
2096 :
2097 : extern const TableAmRoutine *GetTableAmRoutine(Oid amhandler);
2098 : extern const TableAmRoutine *GetHeapamTableAmRoutine(void);
2099 :
2100 : #endif /* TABLEAM_H */
|