Age Owner TLA Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * costsize.c
4 : * Routines to compute (and set) relation sizes and path costs
5 : *
6 : * Path costs are measured in arbitrary units established by these basic
7 : * parameters:
8 : *
9 : * seq_page_cost Cost of a sequential page fetch
10 : * random_page_cost Cost of a non-sequential page fetch
11 : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : *
17 : * We expect that the kernel will typically do some amount of read-ahead
18 : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : * is normally considerably less than random_page_cost. (However, if the
20 : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : *
22 : * We also use a rough estimate "effective_cache_size" of the number of
23 : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : * NBuffers for this purpose because that would ignore the effects of
25 : * the kernel's disk cache.)
26 : *
27 : * Obviously, taking constants for these values is an oversimplification,
28 : * but it's tough enough to get any useful estimates even at this level of
29 : * detail. Note that all of these parameters are user-settable, in case
30 : * the default values are drastically off for a particular platform.
31 : *
32 : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : * an external sort or a materialize node that overflows work_mem.
36 : *
37 : * We compute two separate costs for each path:
38 : * total_cost: total estimated cost to fetch all tuples
39 : * startup_cost: cost that is expended before first tuple is fetched
40 : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : * path's result. A caller can estimate the cost of fetching a partial
43 : * result by interpolating between startup_cost and total_cost. In detail:
44 : * actual_cost = startup_cost +
45 : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : * that this equation works properly. (Note: while path->rows is never zero
49 : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : * plan node.
52 : *
53 : * For largely historical reasons, most of the routines in this module use
54 : * the passed result Path only to store their results (rows, startup_cost and
55 : * total_cost) into. All the input data they need is passed as separate
56 : * parameters, even though much of it could be extracted from the Path.
57 : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : * the other fields of the passed XXXPath to be filled in, and similarly
59 : * cost_index() assumes the passed IndexPath is valid except for its output
60 : * values.
61 : *
62 : *
63 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
64 : * Portions Copyright (c) 1994, Regents of the University of California
65 : *
66 : * IDENTIFICATION
67 : * src/backend/optimizer/path/costsize.c
68 : *
69 : *-------------------------------------------------------------------------
70 : */
71 :
72 : #include "postgres.h"
73 :
74 : #include <limits.h>
75 : #include <math.h>
76 :
77 : #include "access/amapi.h"
78 : #include "access/htup_details.h"
79 : #include "access/tsmapi.h"
80 : #include "executor/executor.h"
81 : #include "executor/nodeAgg.h"
82 : #include "executor/nodeHash.h"
83 : #include "executor/nodeMemoize.h"
84 : #include "miscadmin.h"
85 : #include "nodes/makefuncs.h"
86 : #include "nodes/nodeFuncs.h"
87 : #include "optimizer/clauses.h"
88 : #include "optimizer/cost.h"
89 : #include "optimizer/optimizer.h"
90 : #include "optimizer/pathnode.h"
91 : #include "optimizer/paths.h"
92 : #include "optimizer/placeholder.h"
93 : #include "optimizer/plancat.h"
94 : #include "optimizer/planmain.h"
95 : #include "optimizer/restrictinfo.h"
96 : #include "parser/parsetree.h"
97 : #include "utils/lsyscache.h"
98 : #include "utils/selfuncs.h"
99 : #include "utils/spccache.h"
100 : #include "utils/tuplesort.h"
101 :
102 :
103 : #define LOG2(x) (log(x) / 0.693147180559945)
104 :
105 : /*
106 : * Append and MergeAppend nodes are less expensive than some other operations
107 : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
108 : * per-tuple cost as cpu_tuple_cost multiplied by this value.
109 : */
110 : #define APPEND_CPU_COST_MULTIPLIER 0.5
111 :
112 : /*
113 : * Maximum value for row estimates. We cap row estimates to this to help
114 : * ensure that costs based on these estimates remain within the range of what
115 : * double can represent. add_path() wouldn't act sanely given infinite or NaN
116 : * cost values.
117 : */
118 : #define MAXIMUM_ROWCOUNT 1e100
119 :
120 : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
121 : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
122 : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
123 : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
124 : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
125 : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
126 : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
127 : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
128 :
129 : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
130 :
131 : Cost disable_cost = 1.0e10;
132 :
133 : int max_parallel_workers_per_gather = 2;
134 :
135 : bool enable_seqscan = true;
136 : bool enable_indexscan = true;
137 : bool enable_indexonlyscan = true;
138 : bool enable_bitmapscan = true;
139 : bool enable_tidscan = true;
140 : bool enable_sort = true;
141 : bool enable_incremental_sort = true;
142 : bool enable_hashagg = true;
143 : bool enable_nestloop = true;
144 : bool enable_material = true;
145 : bool enable_memoize = true;
146 : bool enable_mergejoin = true;
147 : bool enable_hashjoin = true;
148 : bool enable_gathermerge = true;
149 : bool enable_partitionwise_join = false;
150 : bool enable_partitionwise_aggregate = false;
151 : bool enable_parallel_append = true;
152 : bool enable_parallel_hash = true;
153 : bool enable_partition_pruning = true;
154 : bool enable_presorted_aggregate = true;
155 : bool enable_async_append = true;
156 :
157 : typedef struct
158 : {
159 : PlannerInfo *root;
160 : QualCost total;
161 : } cost_qual_eval_context;
162 :
163 : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
164 : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
165 : RestrictInfo *rinfo,
166 : PathKey *pathkey);
167 : static void cost_rescan(PlannerInfo *root, Path *path,
168 : Cost *rescan_startup_cost, Cost *rescan_total_cost);
169 : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
170 : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
171 : ParamPathInfo *param_info,
172 : QualCost *qpqual_cost);
173 : static bool has_indexed_join_quals(NestPath *path);
174 : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
175 : List *quals);
176 : static double calc_joinrel_size_estimate(PlannerInfo *root,
177 : RelOptInfo *joinrel,
178 : RelOptInfo *outer_rel,
179 : RelOptInfo *inner_rel,
180 : double outer_rows,
181 : double inner_rows,
182 : SpecialJoinInfo *sjinfo,
183 : List *restrictlist);
184 : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
185 : Relids outer_relids,
186 : Relids inner_relids,
187 : SpecialJoinInfo *sjinfo,
188 : List **restrictlist);
189 : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
190 : int parallel_workers);
191 : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
192 : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
193 : static double relation_byte_size(double tuples, int width);
194 : static double page_size(double tuples, int width);
195 : static double get_parallel_divisor(Path *path);
196 :
197 :
198 : /*
199 : * clamp_row_est
200 : * Force a row-count estimate to a sane value.
201 : */
202 : double
7034 tgl 203 GIC 2847745 : clamp_row_est(double nrows)
204 : {
7034 tgl 205 ECB : /*
206 : * Avoid infinite and NaN row estimates. Costs derived from such values
207 : * are going to be useless. Also force the estimate to be at least one
208 : * row, to make explain output look better and to avoid possible
209 : * divide-by-zero when interpolating costs. Make it an integer, too.
210 : */
902 drowley 211 GIC 2847745 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
902 drowley 212 UIC 0 : nrows = MAXIMUM_ROWCOUNT;
902 drowley 213 CBC 2847745 : else if (nrows <= 1.0)
7034 tgl 214 GBC 1066282 : nrows = 1.0;
7034 tgl 215 ECB : else
6562 tgl 216 CBC 1781463 : nrows = rint(nrows);
217 :
7034 218 2847745 : return nrows;
219 : }
7034 tgl 220 ECB :
221 : /*
222 : * clamp_cardinality_to_long
223 : * Cast a Cardinality value to a sane long value.
224 : */
225 : long
323 tgl 226 GIC 20482 : clamp_cardinality_to_long(Cardinality x)
227 : {
323 tgl 228 ECB : /*
229 : * Just for paranoia's sake, ensure we do something sane with negative or
230 : * NaN values.
231 : */
323 tgl 232 GIC 20482 : if (isnan(x))
323 tgl 233 UIC 0 : return LONG_MAX;
323 tgl 234 CBC 20482 : if (x <= 0)
323 tgl 235 GBC 228 : return 0;
323 tgl 236 ECB :
237 : /*
238 : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
239 : * double. Casting it to double and back may well result in overflow due
240 : * to rounding, so avoid doing that. We trust that any double value that
241 : * compares strictly less than "(double) LONG_MAX" will cast to a
242 : * representable "long" value.
243 : */
323 tgl 244 GIC 20254 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
245 : }
323 tgl 246 ECB :
247 :
248 : /*
249 : * cost_seqscan
250 : * Determines and returns the cost of scanning a relation sequentially.
251 : *
252 : * 'baserel' is the relation to be scanned
253 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
254 : */
255 : void
6517 tgl 256 GIC 169562 : cost_seqscan(Path *path, PlannerInfo *root,
257 : RelOptInfo *baserel, ParamPathInfo *param_info)
9770 scrappy 258 ECB : {
8454 tgl 259 GIC 169562 : Cost startup_cost = 0;
260 : Cost cpu_run_cost;
2636 rhaas 261 ECB : Cost disk_run_cost;
262 : double spc_seq_page_cost;
263 : QualCost qpqual_cost;
264 : Cost cpu_per_tuple;
265 :
266 : /* Should only be applied to base relations */
7365 tgl 267 GIC 169562 : Assert(baserel->relid > 0);
7637 268 169562 : Assert(baserel->rtekind == RTE_RELATION);
8491 tgl 269 ECB :
4007 270 : /* Mark the path with the correct row estimate */
4007 tgl 271 GIC 169562 : if (param_info)
272 291 : path->rows = param_info->ppi_rows;
4007 tgl 273 ECB : else
4007 tgl 274 CBC 169271 : path->rows = baserel->rows;
275 :
8478 276 169562 : if (!enable_seqscan)
8454 tgl 277 GIC 7393 : startup_cost += disable_cost;
9770 scrappy 278 ECB :
4842 rhaas 279 : /* fetch estimated page cost for tablespace containing table */
4842 rhaas 280 GIC 169562 : get_tablespace_page_costs(baserel->reltablespace,
281 : NULL,
4842 rhaas 282 ECB : &spc_seq_page_cost);
283 :
284 : /*
285 : * disk costs
286 : */
2636 rhaas 287 GIC 169562 : disk_run_cost = spc_seq_page_cost * baserel->pages;
288 :
8454 tgl 289 ECB : /* CPU costs */
4007 tgl 290 GIC 169562 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
291 :
4007 tgl 292 CBC 169562 : startup_cost += qpqual_cost.startup;
4007 tgl 293 GIC 169562 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
2636 rhaas 294 CBC 169562 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
2607 tgl 295 ECB : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 296 CBC 169562 : startup_cost += path->pathtarget->cost.startup;
2607 tgl 297 GIC 169562 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
8454 tgl 298 ECB :
2636 rhaas 299 : /* Adjust costing for parallelism, if used. */
2495 rhaas 300 GIC 169562 : if (path->parallel_workers > 0)
301 : {
2277 rhaas 302 CBC 12013 : double parallel_divisor = get_parallel_divisor(path);
303 :
2636 rhaas 304 ECB : /* The CPU cost is divided among all the workers. */
2636 rhaas 305 GIC 12013 : cpu_run_cost /= parallel_divisor;
306 :
2636 rhaas 307 ECB : /*
308 : * It may be possible to amortize some of the I/O cost, but probably
309 : * not very much, because most operating systems already do aggressive
310 : * prefetching. For now, we assume that the disk run cost can't be
311 : * amortized at all.
312 : */
313 :
314 : /*
315 : * In the case of a parallel plan, the row count needs to represent
316 : * the number of tuples processed per worker.
317 : */
2277 rhaas 318 GIC 12013 : path->rows = clamp_row_est(path->rows / parallel_divisor);
319 : }
2706 rhaas 320 ECB :
2886 simon 321 GIC 169562 : path->startup_cost = startup_cost;
2636 rhaas 322 169562 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
2886 simon 323 CBC 169562 : }
2886 simon 324 ECB :
325 : /*
326 : * cost_samplescan
327 : * Determines and returns the cost of scanning a relation using sampling.
328 : *
329 : * 'baserel' is the relation to be scanned
330 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
331 : */
332 : void
2815 tgl 333 GIC 126 : cost_samplescan(Path *path, PlannerInfo *root,
334 : RelOptInfo *baserel, ParamPathInfo *param_info)
2886 simon 335 ECB : {
2886 simon 336 GIC 126 : Cost startup_cost = 0;
337 126 : Cost run_cost = 0;
2815 tgl 338 ECB : RangeTblEntry *rte;
339 : TableSampleClause *tsc;
340 : TsmRoutine *tsm;
341 : double spc_seq_page_cost,
342 : spc_random_page_cost,
343 : spc_page_cost;
344 : QualCost qpqual_cost;
345 : Cost cpu_per_tuple;
346 :
347 : /* Should only be applied to base relations with tablesample clauses */
2886 simon 348 GIC 126 : Assert(baserel->relid > 0);
2815 tgl 349 126 : rte = planner_rt_fetch(baserel->relid, root);
2815 tgl 350 CBC 126 : Assert(rte->rtekind == RTE_RELATION);
351 126 : tsc = rte->tablesample;
352 126 : Assert(tsc != NULL);
353 126 : tsm = GetTsmRoutine(tsc->tsmhandler);
2886 simon 354 ECB :
355 : /* Mark the path with the correct row estimate */
2815 tgl 356 GIC 126 : if (param_info)
357 9 : path->rows = param_info->ppi_rows;
2886 simon 358 ECB : else
2886 simon 359 CBC 117 : path->rows = baserel->rows;
360 :
2886 simon 361 ECB : /* fetch estimated page cost for tablespace containing table */
2886 simon 362 GIC 126 : get_tablespace_page_costs(baserel->reltablespace,
363 : &spc_random_page_cost,
2886 simon 364 ECB : &spc_seq_page_cost);
365 :
366 : /* if NextSampleBlock is used, assume random access, else sequential */
2815 tgl 367 GIC 252 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
368 126 : spc_random_page_cost : spc_seq_page_cost;
2886 simon 369 ECB :
370 : /*
371 : * disk costs (recall that baserel->pages has already been set to the
372 : * number of pages the sampling method will visit)
373 : */
2815 tgl 374 GIC 126 : run_cost += spc_page_cost * baserel->pages;
375 :
2815 tgl 376 ECB : /*
377 : * CPU costs (recall that baserel->tuples has already been set to the
378 : * number of tuples the sampling method will select). Note that we ignore
379 : * execution cost of the TABLESAMPLE parameter expressions; they will be
380 : * evaluated only once per scan, and in most usages they'll likely be
381 : * simple constants anyway. We also don't charge anything for the
382 : * calculations the sampling method might do internally.
383 : */
2815 tgl 384 GIC 126 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
385 :
2886 simon 386 CBC 126 : startup_cost += qpqual_cost.startup;
2886 simon 387 GIC 126 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
2815 tgl 388 CBC 126 : run_cost += cpu_per_tuple * baserel->tuples;
2607 tgl 389 ECB : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 390 CBC 126 : startup_cost += path->pathtarget->cost.startup;
2607 tgl 391 GIC 126 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
2886 simon 392 ECB :
8454 tgl 393 CBC 126 : path->startup_cost = startup_cost;
8454 tgl 394 GIC 126 : path->total_cost = startup_cost + run_cost;
9770 scrappy 395 CBC 126 : }
9770 scrappy 396 ECB :
2748 rhaas 397 : /*
398 : * cost_gather
399 : * Determines and returns the cost of gather path.
400 : *
401 : * 'rel' is the relation to be operated upon
402 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
403 : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
404 : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
405 : * correspond to any particular RelOptInfo.
406 : */
407 : void
2748 rhaas 408 GIC 7238 : cost_gather(GatherPath *path, PlannerInfo *root,
409 : RelOptInfo *rel, ParamPathInfo *param_info,
2575 rhaas 410 ECB : double *rows)
411 : {
2748 rhaas 412 GIC 7238 : Cost startup_cost = 0;
413 7238 : Cost run_cost = 0;
2748 rhaas 414 ECB :
415 : /* Mark the path with the correct row estimate */
2575 rhaas 416 GIC 7238 : if (rows)
417 826 : path->path.rows = *rows;
2575 rhaas 418 CBC 6412 : else if (param_info)
2748 rhaas 419 LBC 0 : path->path.rows = param_info->ppi_rows;
2748 rhaas 420 ECB : else
2748 rhaas 421 GBC 6412 : path->path.rows = rel->rows;
422 :
2748 rhaas 423 CBC 7238 : startup_cost = path->subpath->startup_cost;
424 :
425 7238 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
426 :
2748 rhaas 427 ECB : /* Parallel setup and communication cost. */
2748 rhaas 428 GIC 7238 : startup_cost += parallel_setup_cost;
2706 429 7238 : run_cost += parallel_tuple_cost * path->path.rows;
2748 rhaas 430 ECB :
2748 rhaas 431 CBC 7238 : path->path.startup_cost = startup_cost;
2748 rhaas 432 GIC 7238 : path->path.total_cost = (startup_cost + run_cost);
2748 rhaas 433 CBC 7238 : }
2748 rhaas 434 ECB :
2222 435 : /*
436 : * cost_gather_merge
437 : * Determines and returns the cost of gather merge path.
438 : *
439 : * GatherMerge merges several pre-sorted input streams, using a heap that at
440 : * any given instant holds the next tuple from each stream. If there are N
441 : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
442 : * startup, and then for each output tuple, about log2(N) comparisons to
443 : * replace the top heap entry with the next tuple from the same stream.
444 : */
445 : void
2222 rhaas 446 GIC 4712 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
447 : RelOptInfo *rel, ParamPathInfo *param_info,
2222 rhaas 448 ECB : Cost input_startup_cost, Cost input_total_cost,
449 : double *rows)
450 : {
2222 rhaas 451 GIC 4712 : Cost startup_cost = 0;
452 4712 : Cost run_cost = 0;
2222 rhaas 453 ECB : Cost comparison_cost;
454 : double N;
455 : double logN;
456 :
457 : /* Mark the path with the correct row estimate */
2222 rhaas 458 GIC 4712 : if (rows)
459 2160 : path->path.rows = *rows;
2222 rhaas 460 CBC 2552 : else if (param_info)
2222 rhaas 461 LBC 0 : path->path.rows = param_info->ppi_rows;
2222 rhaas 462 ECB : else
2222 rhaas 463 GBC 2552 : path->path.rows = rel->rows;
464 :
2222 rhaas 465 CBC 4712 : if (!enable_gathermerge)
2222 rhaas 466 UIC 0 : startup_cost += disable_cost;
2222 rhaas 467 ECB :
2222 rhaas 468 EUB : /*
469 : * Add one to the number of workers to account for the leader. This might
470 : * be overgenerous since the leader will do less work than other workers
471 : * in typical cases, but we'll go with it for now.
472 : */
2222 rhaas 473 GIC 4712 : Assert(path->num_workers > 0);
474 4712 : N = (double) path->num_workers + 1;
2222 rhaas 475 CBC 4712 : logN = LOG2(N);
2222 rhaas 476 ECB :
477 : /* Assumed cost per tuple comparison */
2222 rhaas 478 GIC 4712 : comparison_cost = 2.0 * cpu_operator_cost;
479 :
2222 rhaas 480 ECB : /* Heap creation cost */
2222 rhaas 481 GIC 4712 : startup_cost += comparison_cost * N * logN;
482 :
2222 rhaas 483 ECB : /* Per-tuple heap maintenance cost */
2222 rhaas 484 GIC 4712 : run_cost += path->path.rows * comparison_cost * logN;
485 :
2222 rhaas 486 ECB : /* small cost for heap management, like cost_merge_append */
2222 rhaas 487 GIC 4712 : run_cost += cpu_operator_cost * path->path.rows;
488 :
2222 rhaas 489 ECB : /*
490 : * Parallel setup and communication cost. Since Gather Merge, unlike
491 : * Gather, requires us to block until a tuple is available from every
492 : * worker, we bump the IPC cost up a little bit as compared with Gather.
493 : * For lack of a better idea, charge an extra 5%.
494 : */
2222 rhaas 495 GIC 4712 : startup_cost += parallel_setup_cost;
496 4712 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
2222 rhaas 497 ECB :
2222 rhaas 498 CBC 4712 : path->path.startup_cost = startup_cost + input_startup_cost;
2222 rhaas 499 GIC 4712 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
2222 rhaas 500 CBC 4712 : }
2222 rhaas 501 ECB :
9345 bruce 502 : /*
503 : * cost_index
504 : * Determines and returns the cost of scanning a relation using an index.
505 : *
506 : * 'path' describes the indexscan under consideration, and is complete
507 : * except for the fields to be set by this routine
508 : * 'loop_count' is the number of repetitions of the indexscan to factor into
509 : * estimates of caching behavior
510 : *
511 : * In addition to rows, startup_cost and total_cost, cost_index() sets the
512 : * path's indextotalcost and indexselectivity fields. These values will be
513 : * needed if the IndexPath is used in a BitmapIndexScan.
514 : *
515 : * NOTE: path->indexquals must contain only clauses usable as index
516 : * restrictions. Any additional quals evaluated as qpquals may reduce the
517 : * number of returned tuples, but they won't reduce the number of tuples
518 : * we have to fetch from the table, so they don't reduce the scan cost.
519 : */
520 : void
2244 rhaas 521 GIC 266936 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
522 : bool partial_path)
9770 scrappy 523 ECB : {
4124 tgl 524 GIC 266936 : IndexOptInfo *index = path->indexinfo;
6587 525 266936 : RelOptInfo *baserel = index->rel;
4124 tgl 526 CBC 266936 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
2639 tgl 527 ECB : amcostestimate_function amcostestimate;
2959 528 : List *qpquals;
8454 tgl 529 GIC 266936 : Cost startup_cost = 0;
530 266936 : Cost run_cost = 0;
2244 rhaas 531 CBC 266936 : Cost cpu_run_cost = 0;
8454 tgl 532 ECB : Cost indexStartupCost;
533 : Cost indexTotalCost;
534 : Selectivity indexSelectivity;
535 : double indexCorrelation,
536 : csquared;
537 : double spc_seq_page_cost,
538 : spc_random_page_cost;
539 : Cost min_IO_cost,
540 : max_IO_cost;
541 : QualCost qpqual_cost;
542 : Cost cpu_per_tuple;
543 : double tuples_fetched;
544 : double pages_fetched;
545 : double rand_heap_pages;
546 : double index_pages;
547 :
548 : /* Should only be applied to base relations */
7637 tgl 549 GIC 266936 : Assert(IsA(baserel, RelOptInfo) &&
550 : IsA(index, IndexOptInfo));
7365 tgl 551 CBC 266936 : Assert(baserel->relid > 0);
7637 tgl 552 GIC 266936 : Assert(baserel->rtekind == RTE_RELATION);
9770 scrappy 553 ECB :
2959 tgl 554 : /*
555 : * Mark the path with the correct row estimate, and identify which quals
556 : * will need to be enforced as qpquals. We need not check any quals that
557 : * are implied by the index's predicate, so we can use indrestrictinfo not
558 : * baserestrictinfo as the list of relevant restriction clauses for the
559 : * rel.
560 : */
4007 tgl 561 GIC 266936 : if (path->path.param_info)
562 : {
4007 tgl 563 CBC 50764 : path->path.rows = path->path.param_info->ppi_rows;
564 : /* qpquals come from the rel's restriction clauses and ppi_clauses */
1520 565 50764 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
566 : path->indexclauses),
2118 567 50764 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
568 : path->indexclauses));
4090 tgl 569 ECB : }
570 : else
571 : {
4007 tgl 572 GIC 216172 : path->path.rows = baserel->rows;
573 : /* qpquals come from just the rel's restriction clauses */
2565 tgl 574 CBC 216172 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
575 : path->indexclauses);
4090 tgl 576 ECB : }
577 :
7435 tgl 578 GIC 266936 : if (!enable_indexscan)
8454 579 1936 : startup_cost += disable_cost;
4201 tgl 580 ECB : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
9770 scrappy 581 :
582 : /*
583 : * Call index-access-method-specific code to estimate the processing cost
584 : * for scanning the index, as well as the selectivity of the index (ie,
585 : * the fraction of main-table tuples we will have to retrieve) and its
586 : * correlation to the main-table tuple order. We need a cast here because
587 : * pathnodes.h uses a weak function type to avoid including amapi.h.
588 : */
2639 tgl 589 GIC 266936 : amcostestimate = (amcostestimate_function) index->amcostestimate;
590 266936 : amcostestimate(root, path, loop_count,
2639 tgl 591 ECB : &indexStartupCost, &indexTotalCost,
2244 rhaas 592 : &indexSelectivity, &indexCorrelation,
593 : &index_pages);
594 :
595 : /*
596 : * Save amcostestimate's results for possible use in bitmap scan planning.
597 : * We don't bother to save indexStartupCost or indexCorrelation, because a
598 : * bitmap scan doesn't care about either.
599 : */
6562 tgl 600 GIC 266936 : path->indextotalcost = indexTotalCost;
601 266936 : path->indexselectivity = indexSelectivity;
6562 tgl 602 ECB :
8478 603 : /* all costs for touching index itself included here */
8454 tgl 604 GIC 266936 : startup_cost += indexStartupCost;
605 266936 : run_cost += indexTotalCost - indexStartupCost;
9770 scrappy 606 ECB :
6151 tgl 607 : /* estimate number of main-table tuples fetched */
6151 tgl 608 GIC 266936 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
609 :
4842 rhaas 610 ECB : /* fetch estimated page costs for tablespace containing table */
4842 rhaas 611 GIC 266936 : get_tablespace_page_costs(baserel->reltablespace,
612 : &spc_random_page_cost,
4842 rhaas 613 ECB : &spc_seq_page_cost);
614 :
615 : /*----------
616 : * Estimate number of main-table pages fetched, and compute I/O cost.
617 : *
618 : * When the index ordering is uncorrelated with the table ordering,
619 : * we use an approximation proposed by Mackert and Lohman (see
620 : * index_pages_fetched() for details) to compute the number of pages
621 : * fetched, and then charge spc_random_page_cost per page fetched.
622 : *
623 : * When the index ordering is exactly correlated with the table ordering
624 : * (just after a CLUSTER, for example), the number of pages fetched should
625 : * be exactly selectivity * table_size. What's more, all but the first
626 : * will be sequential fetches, not the random fetches that occur in the
627 : * uncorrelated case. So if the number of pages is more than 1, we
628 : * ought to charge
629 : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
630 : * For partially-correlated indexes, we ought to charge somewhere between
631 : * these two estimates. We currently interpolate linearly between the
632 : * estimates based on the correlation squared (XXX is that appropriate?).
633 : *
634 : * If it's an index-only scan, then we will not need to fetch any heap
635 : * pages for which the visibility map shows all tuples are visible.
636 : * Hence, reduce the estimated number of heap fetches accordingly.
637 : * We use the measured fraction of the entire heap that is all-visible,
638 : * which might not be particularly relevant to the subset of the heap
639 : * that this query will fetch; but it's not clear how to do better.
640 : *----------
641 : */
4090 tgl 642 GIC 266936 : if (loop_count > 1)
643 : {
6151 tgl 644 ECB : /*
645 : * For repeated indexscans, the appropriate estimate for the
646 : * uncorrelated case is to scale up the number of tuples fetched in
647 : * the Mackert and Lohman formula by the number of scans, so that we
648 : * estimate the number of pages fetched by all the scans; then
649 : * pro-rate the costs for one scan. In this case we assume all the
650 : * fetches are random accesses.
651 : */
4090 tgl 652 GIC 29207 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
653 : baserel->pages,
6046 tgl 654 CBC 29207 : (double) index->pages,
655 : root);
6151 tgl 656 ECB :
4201 tgl 657 GIC 29207 : if (indexonly)
4195 658 4512 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
4202 tgl 659 ECB :
2244 rhaas 660 CBC 29207 : rand_heap_pages = pages_fetched;
661 :
4090 tgl 662 29207 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
663 :
5959 tgl 664 ECB : /*
665 : * In the perfectly correlated case, the number of pages touched by
666 : * each scan is selectivity * table_size, and we can use the Mackert
667 : * and Lohman formula at the page level to estimate how much work is
668 : * saved by caching across scans. We still assume all the fetches are
669 : * random, though, which is an overestimate that's hard to correct for
670 : * without double-counting the cache effects. (But in most cases
671 : * where such a plan is actually interesting, only one page would get
672 : * fetched per scan anyway, so it shouldn't matter much.)
673 : */
5959 tgl 674 GIC 29207 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
675 :
4090 tgl 676 CBC 29207 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
677 : baserel->pages,
5959 678 29207 : (double) index->pages,
679 : root);
5959 tgl 680 ECB :
4201 tgl 681 GIC 29207 : if (indexonly)
4195 682 4512 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
4202 tgl 683 ECB :
4090 tgl 684 CBC 29207 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
685 : }
8454 tgl 686 ECB : else
687 : {
688 : /*
689 : * Normal case: apply the Mackert and Lohman formula, and then
690 : * interpolate between that and the correlation-derived result.
691 : */
6151 tgl 692 GIC 237729 : pages_fetched = index_pages_fetched(tuples_fetched,
693 : baserel->pages,
6046 tgl 694 CBC 237729 : (double) index->pages,
695 : root);
6151 tgl 696 ECB :
4201 tgl 697 GIC 237729 : if (indexonly)
4195 698 25625 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
4202 tgl 699 ECB :
2244 rhaas 700 CBC 237729 : rand_heap_pages = pages_fetched;
701 :
6151 tgl 702 ECB : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
4842 rhaas 703 GIC 237729 : max_IO_cost = pages_fetched * spc_random_page_cost;
704 :
6151 tgl 705 ECB : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
6151 tgl 706 GIC 237729 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
707 :
4201 tgl 708 CBC 237729 : if (indexonly)
4195 tgl 709 GIC 25625 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
4202 tgl 710 ECB :
4193 tgl 711 CBC 237729 : if (pages_fetched > 0)
712 : {
713 220103 : min_IO_cost = spc_random_page_cost;
4193 tgl 714 GIC 220103 : if (pages_fetched > 1)
4193 tgl 715 CBC 61017 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
4193 tgl 716 ECB : }
717 : else
4193 tgl 718 GIC 17626 : min_IO_cost = 0;
719 : }
6151 tgl 720 ECB :
2244 rhaas 721 GIC 266936 : if (partial_path)
722 : {
2217 rhaas 723 ECB : /*
724 : * For index only scans compute workers based on number of index pages
725 : * fetched; the number of heap pages we fetch might be so small as to
726 : * effectively rule out parallelism, which we don't want to do.
727 : */
2217 rhaas 728 GIC 86429 : if (indexonly)
729 8690 : rand_heap_pages = -1;
2217 rhaas 730 ECB :
2244 731 : /*
732 : * Estimate the number of parallel workers required to scan index. Use
733 : * the number of heap pages computed considering heap fetches won't be
734 : * sequential as for parallel scans the pages are accessed in random
735 : * order.
736 : */
2244 rhaas 737 GIC 86429 : path->path.parallel_workers = compute_parallel_worker(baserel,
738 : rand_heap_pages,
1892 rhaas 739 ECB : index_pages,
740 : max_parallel_workers_per_gather);
741 :
742 : /*
743 : * Fall out if workers can't be assigned for parallel scan, because in
744 : * such a case this path will be rejected. So there is no benefit in
745 : * doing extra computation.
746 : */
2244 rhaas 747 GIC 86429 : if (path->path.parallel_workers <= 0)
748 81683 : return;
2244 rhaas 749 ECB :
2244 rhaas 750 CBC 4746 : path->path.parallel_aware = true;
751 : }
2244 rhaas 752 ECB :
753 : /*
754 : * Now interpolate based on estimated index order correlation to get total
755 : * disk I/O cost for main table accesses.
756 : */
5959 tgl 757 GIC 185253 : csquared = indexCorrelation * indexCorrelation;
758 :
5959 tgl 759 CBC 185253 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
760 :
8454 tgl 761 ECB : /*
762 : * Estimate CPU costs per tuple.
763 : *
764 : * What we want here is cpu_tuple_cost plus the evaluation costs of any
765 : * qual clauses that we have to evaluate as qpquals.
766 : */
2959 tgl 767 GIC 185253 : cost_qual_eval(&qpqual_cost, qpquals, root);
768 :
4015 tgl 769 CBC 185253 : startup_cost += qpqual_cost.startup;
4015 tgl 770 GIC 185253 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
9770 scrappy 771 ECB :
2244 rhaas 772 CBC 185253 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
773 :
2607 tgl 774 ECB : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 775 GIC 185253 : startup_cost += path->path.pathtarget->cost.startup;
2244 rhaas 776 185253 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
2244 rhaas 777 ECB :
778 : /* Adjust costing for parallelism, if used. */
2244 rhaas 779 GIC 185253 : if (path->path.parallel_workers > 0)
780 : {
2244 rhaas 781 CBC 4746 : double parallel_divisor = get_parallel_divisor(&path->path);
782 :
783 4746 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
784 :
2244 rhaas 785 ECB : /* The CPU cost is divided among all the workers. */
2244 rhaas 786 GIC 4746 : cpu_run_cost /= parallel_divisor;
787 : }
2244 rhaas 788 ECB :
2244 rhaas 789 GIC 185253 : run_cost += cpu_run_cost;
790 :
6562 tgl 791 CBC 185253 : path->path.startup_cost = startup_cost;
6562 tgl 792 GIC 185253 : path->path.total_cost = startup_cost + run_cost;
9770 scrappy 793 ECB : }
794 :
795 : /*
796 : * extract_nonindex_conditions
797 : *
798 : * Given a list of quals to be enforced in an indexscan, extract the ones that
799 : * will have to be applied as qpquals (ie, the index machinery won't handle
800 : * them). Here we detect only whether a qual clause is directly redundant
801 : * with some indexclause. If the index path is chosen for use, createplan.c
802 : * will try a bit harder to get rid of redundant qual conditions; specifically
803 : * it will see if quals can be proven to be implied by the indexquals. But
804 : * it does not seem worth the cycles to try to factor that in at this stage,
805 : * since we're only trying to estimate qual eval costs. Otherwise this must
806 : * match the logic in create_indexscan_plan().
807 : *
808 : * qual_clauses, and the result, are lists of RestrictInfos.
809 : * indexclauses is a list of IndexClauses.
810 : */
811 : static List *
1520 tgl 812 GIC 317700 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
813 : {
2959 tgl 814 CBC 317700 : List *result = NIL;
815 : ListCell *lc;
2959 tgl 816 ECB :
2959 tgl 817 GIC 645187 : foreach(lc, qual_clauses)
818 : {
2190 tgl 819 CBC 327487 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
820 :
2959 821 327487 : if (rinfo->pseudoconstant)
2959 tgl 822 GIC 1420 : continue; /* we may drop pseudoconstants here */
1520 tgl 823 CBC 326067 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
824 196735 : continue; /* dup or derived from same EquivalenceClass */
2565 tgl 825 ECB : /* ... skip the predicate proof attempt createplan.c will try ... */
2959 tgl 826 CBC 129332 : result = lappend(result, rinfo);
827 : }
828 317700 : return result;
829 : }
2959 tgl 830 ECB :
831 : /*
832 : * index_pages_fetched
833 : * Estimate the number of pages actually fetched after accounting for
834 : * cache effects.
835 : *
836 : * We use an approximation proposed by Mackert and Lohman, "Index Scans
837 : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
838 : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
839 : * The Mackert and Lohman approximation is that the number of pages
840 : * fetched is
841 : * PF =
842 : * min(2TNs/(2T+Ns), T) when T <= b
843 : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
844 : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
845 : * where
846 : * T = # pages in table
847 : * N = # tuples in table
848 : * s = selectivity = fraction of table to be scanned
849 : * b = # buffer pages available (we include kernel space here)
850 : *
851 : * We assume that effective_cache_size is the total number of buffer pages
852 : * available for the whole query, and pro-rate that space across all the
853 : * tables in the query and the index currently under consideration. (This
854 : * ignores space needed for other indexes used by the query, but since we
855 : * don't know which indexes will get used, we can't estimate that very well;
856 : * and in any case counting all the tables may well be an overestimate, since
857 : * depending on the join plan not all the tables may be scanned concurrently.)
858 : *
859 : * The product Ns is the number of tuples fetched; we pass in that
860 : * product rather than calculating it here. "pages" is the number of pages
861 : * in the object under consideration (either an index or a table).
862 : * "index_pages" is the amount to add to the total table space, which was
863 : * computed for us by make_one_rel.
864 : *
865 : * Caller is expected to have ensured that tuples_fetched is greater than zero
866 : * and rounded to integer (see clamp_row_est). The result will likewise be
867 : * greater than zero and integral.
868 : */
869 : double
6151 tgl 870 GIC 375855 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
871 : double index_pages, PlannerInfo *root)
6151 tgl 872 ECB : {
873 : double pages_fetched;
874 : double total_pages;
875 : double T,
876 : b;
877 :
878 : /* T is # pages in table, but don't allow it to be zero */
6151 tgl 879 GIC 375855 : T = (pages > 1) ? (double) pages : 1.0;
880 :
6046 tgl 881 ECB : /* Compute number of pages assumed to be competing for cache space */
6046 tgl 882 GIC 375855 : total_pages = root->total_table_pages + index_pages;
883 375855 : total_pages = Max(total_pages, 1.0);
6046 tgl 884 CBC 375855 : Assert(T <= total_pages);
6046 tgl 885 ECB :
6151 886 : /* b is pro-rated share of effective_cache_size */
2118 tgl 887 GIC 375855 : b = (double) effective_cache_size * T / total_pages;
888 :
6151 tgl 889 ECB : /* force it positive and integral */
6151 tgl 890 GIC 375855 : if (b <= 1.0)
6151 tgl 891 UIC 0 : b = 1.0;
6151 tgl 892 ECB : else
6151 tgl 893 GBC 375855 : b = ceil(b);
894 :
6151 tgl 895 ECB : /* This part is the Mackert and Lohman formula */
6151 tgl 896 GIC 375855 : if (T <= b)
897 : {
6151 tgl 898 CBC 375855 : pages_fetched =
6151 tgl 899 GIC 375855 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6151 tgl 900 CBC 375855 : if (pages_fetched >= T)
901 219690 : pages_fetched = T;
6151 tgl 902 ECB : else
6151 tgl 903 CBC 156165 : pages_fetched = ceil(pages_fetched);
904 : }
6151 tgl 905 ECB : else
906 : {
907 : double lim;
908 :
6151 tgl 909 UIC 0 : lim = (2.0 * T * b) / (2.0 * T - b);
910 0 : if (tuples_fetched <= lim)
6151 tgl 911 EUB : {
6151 tgl 912 UBC 0 : pages_fetched =
6151 tgl 913 UIC 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6151 tgl 914 EUB : }
915 : else
916 : {
6151 tgl 917 UIC 0 : pages_fetched =
918 0 : b + (tuples_fetched - lim) * (T - b) / T;
6151 tgl 919 EUB : }
6151 tgl 920 UBC 0 : pages_fetched = ceil(pages_fetched);
921 : }
6151 tgl 922 GBC 375855 : return pages_fetched;
923 : }
6151 tgl 924 ECB :
925 : /*
926 : * get_indexpath_pages
927 : * Determine the total size of the indexes used in a bitmap index path.
928 : *
929 : * Note: if the same index is used more than once in a bitmap tree, we will
930 : * count it multiple times, which perhaps is the wrong thing ... but it's
931 : * not completely clear, and detecting duplicates is difficult, so ignore it
932 : * for now.
933 : */
934 : static double
6046 tgl 935 GIC 61367 : get_indexpath_pages(Path *bitmapqual)
936 : {
6046 tgl 937 CBC 61367 : double result = 0;
938 : ListCell *l;
6046 tgl 939 ECB :
6046 tgl 940 GIC 61367 : if (IsA(bitmapqual, BitmapAndPath))
941 : {
6046 tgl 942 CBC 7017 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
943 :
944 21051 : foreach(l, apath->bitmapquals)
945 : {
946 14034 : result += get_indexpath_pages((Path *) lfirst(l));
947 : }
6046 tgl 948 ECB : }
6046 tgl 949 GIC 54350 : else if (IsA(bitmapqual, BitmapOrPath))
950 : {
6046 tgl 951 CBC 29 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
952 :
953 87 : foreach(l, opath->bitmapquals)
954 : {
955 58 : result += get_indexpath_pages((Path *) lfirst(l));
956 : }
6046 tgl 957 ECB : }
6046 tgl 958 GIC 54321 : else if (IsA(bitmapqual, IndexPath))
959 : {
6046 tgl 960 CBC 54321 : IndexPath *ipath = (IndexPath *) bitmapqual;
961 :
962 54321 : result = (double) ipath->indexinfo->pages;
963 : }
6046 tgl 964 ECB : else
6046 tgl 965 UIC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
966 :
6046 tgl 967 GBC 61367 : return result;
968 : }
6046 tgl 969 ECB :
970 : /*
971 : * cost_bitmap_heap_scan
972 : * Determines and returns the cost of scanning a relation using a bitmap
973 : * index-then-heap plan.
974 : *
975 : * 'baserel' is the relation to be scanned
976 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
977 : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
978 : * 'loop_count' is the number of repetitions of the indexscan to factor into
979 : * estimates of caching behavior
980 : *
981 : * Note: the component IndexPaths in bitmapqual should have been costed
982 : * using the same loop_count.
983 : */
984 : void
6517 tgl 985 GIC 182089 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
986 : ParamPathInfo *param_info,
4090 tgl 987 ECB : Path *bitmapqual, double loop_count)
988 : {
6564 tgl 989 GIC 182089 : Cost startup_cost = 0;
990 182089 : Cost run_cost = 0;
6562 tgl 991 ECB : Cost indexTotalCost;
4007 992 : QualCost qpqual_cost;
993 : Cost cpu_per_tuple;
994 : Cost cost_per_page;
995 : Cost cpu_run_cost;
996 : double tuples_fetched;
997 : double pages_fetched;
998 : double spc_seq_page_cost,
999 : spc_random_page_cost;
1000 : double T;
1001 :
1002 : /* Should only be applied to base relations */
6564 tgl 1003 GIC 182089 : Assert(IsA(baserel, RelOptInfo));
1004 182089 : Assert(baserel->relid > 0);
6564 tgl 1005 CBC 182089 : Assert(baserel->rtekind == RTE_RELATION);
6564 tgl 1006 ECB :
4007 1007 : /* Mark the path with the correct row estimate */
4007 tgl 1008 GIC 182089 : if (param_info)
1009 74912 : path->rows = param_info->ppi_rows;
4090 tgl 1010 ECB : else
4090 tgl 1011 CBC 107177 : path->rows = baserel->rows;
1012 :
6562 1013 182089 : if (!enable_bitmapscan)
6562 tgl 1014 GIC 4617 : startup_cost += disable_cost;
6562 tgl 1015 ECB :
2263 rhaas 1016 CBC 182089 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1017 : loop_count, &indexTotalCost,
2263 rhaas 1018 ECB : &tuples_fetched);
1019 :
6562 tgl 1020 GIC 182089 : startup_cost += indexTotalCost;
2263 rhaas 1021 182089 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6562 tgl 1022 ECB :
4842 rhaas 1023 : /* Fetch estimated page costs for tablespace containing table. */
4842 rhaas 1024 GIC 182089 : get_tablespace_page_costs(baserel->reltablespace,
1025 : &spc_random_page_cost,
4842 rhaas 1026 ECB : &spc_seq_page_cost);
1027 :
1028 : /*
1029 : * For small numbers of pages we should charge spc_random_page_cost
1030 : * apiece, while if nearly all the table's pages are being read, it's more
1031 : * appropriate to charge spc_seq_page_cost apiece. The effect is
1032 : * nonlinear, too. For lack of a better idea, interpolate like this to
1033 : * determine the cost per page.
1034 : */
6561 tgl 1035 GIC 182089 : if (pages_fetched >= 2.0)
4842 rhaas 1036 39221 : cost_per_page = spc_random_page_cost -
4842 rhaas 1037 CBC 39221 : (spc_random_page_cost - spc_seq_page_cost)
1038 39221 : * sqrt(pages_fetched / T);
6561 tgl 1039 ECB : else
4842 rhaas 1040 CBC 142868 : cost_per_page = spc_random_page_cost;
1041 :
6562 tgl 1042 182089 : run_cost += pages_fetched * cost_per_page;
1043 :
6562 tgl 1044 ECB : /*
1045 : * Estimate CPU costs per tuple.
1046 : *
1047 : * Often the indexquals don't need to be rechecked at each tuple ... but
1048 : * not always, especially not if there are enough tuples involved that the
1049 : * bitmaps become lossy. For the moment, just assume they will be
1050 : * rechecked always. This means we charge the full freight for all the
1051 : * scan clauses.
1052 : */
4007 tgl 1053 GIC 182089 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1054 :
4007 tgl 1055 CBC 182089 : startup_cost += qpqual_cost.startup;
4007 tgl 1056 GIC 182089 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
2223 rhaas 1057 CBC 182089 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
2223 rhaas 1058 ECB :
1059 : /* Adjust costing for parallelism, if used. */
2223 rhaas 1060 GIC 182089 : if (path->parallel_workers > 0)
1061 : {
2223 rhaas 1062 CBC 2024 : double parallel_divisor = get_parallel_divisor(path);
1063 :
2223 rhaas 1064 ECB : /* The CPU cost is divided among all the workers. */
2223 rhaas 1065 GIC 2024 : cpu_run_cost /= parallel_divisor;
1066 :
2223 rhaas 1067 CBC 2024 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1068 : }
2223 rhaas 1069 ECB :
1070 :
2223 rhaas 1071 GIC 182089 : run_cost += cpu_run_cost;
1072 :
2607 tgl 1073 ECB : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 1074 GIC 182089 : startup_cost += path->pathtarget->cost.startup;
1075 182089 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
2607 tgl 1076 ECB :
6564 tgl 1077 CBC 182089 : path->startup_cost = startup_cost;
6564 tgl 1078 GIC 182089 : path->total_cost = startup_cost + run_cost;
6564 tgl 1079 CBC 182089 : }
6564 tgl 1080 ECB :
6562 1081 : /*
1082 : * cost_bitmap_tree_node
1083 : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1084 : */
1085 : void
6562 tgl 1086 GIC 322322 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1087 : {
6562 tgl 1088 CBC 322322 : if (IsA(path, IndexPath))
1089 : {
1090 308306 : *cost = ((IndexPath *) path)->indextotalcost;
6562 tgl 1091 GIC 308306 : *selec = ((IndexPath *) path)->indexselectivity;
5624 bruce 1092 ECB :
5959 tgl 1093 : /*
1094 : * Charge a small amount per retrieved tuple to reflect the costs of
1095 : * manipulating the bitmap. This is mostly to make sure that a bitmap
1096 : * scan doesn't look to be the same cost as an indexscan to retrieve a
1097 : * single tuple.
1098 : */
4090 tgl 1099 GIC 308306 : *cost += 0.1 * cpu_operator_cost * path->rows;
1100 : }
6562 tgl 1101 CBC 14016 : else if (IsA(path, BitmapAndPath))
1102 : {
1103 12926 : *cost = path->total_cost;
6562 tgl 1104 GIC 12926 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
6562 tgl 1105 ECB : }
6562 tgl 1106 CBC 1090 : else if (IsA(path, BitmapOrPath))
1107 : {
1108 1090 : *cost = path->total_cost;
6562 tgl 1109 GIC 1090 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
6562 tgl 1110 ECB : }
1111 : else
1112 : {
6562 tgl 1113 UIC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1114 : *cost = *selec = 0; /* keep compiler quiet */
5993 tgl 1115 EUB : }
6562 tgl 1116 GIC 322322 : }
1117 :
6562 tgl 1118 ECB : /*
1119 : * cost_bitmap_and_node
1120 : * Estimate the cost of a BitmapAnd node
1121 : *
1122 : * Note that this considers only the costs of index scanning and bitmap
1123 : * creation, not the eventual heap access. In that sense the object isn't
1124 : * truly a Path, but it has enough path-like properties (costs in particular)
1125 : * to warrant treating it as one. We don't bother to set the path rows field,
1126 : * however.
1127 : */
1128 : void
6517 tgl 1129 GIC 12900 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1130 : {
6562 tgl 1131 ECB : Cost totalCost;
1132 : Selectivity selec;
1133 : ListCell *l;
1134 :
1135 : /*
1136 : * We estimate AND selectivity on the assumption that the inputs are
1137 : * independent. This is probably often wrong, but we don't have the info
1138 : * to do better.
1139 : *
1140 : * The runtime cost of the BitmapAnd itself is estimated at 100x
1141 : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1142 : * definitely too simplistic?
1143 : */
6562 tgl 1144 GIC 12900 : totalCost = 0.0;
1145 12900 : selec = 1.0;
6562 tgl 1146 CBC 38700 : foreach(l, path->bitmapquals)
6562 tgl 1147 ECB : {
6385 bruce 1148 CBC 25800 : Path *subpath = (Path *) lfirst(l);
1149 : Cost subCost;
6562 tgl 1150 ECB : Selectivity subselec;
1151 :
6562 tgl 1152 GIC 25800 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1153 :
6562 tgl 1154 CBC 25800 : selec *= subselec;
1155 :
1156 25800 : totalCost += subCost;
6562 tgl 1157 GIC 25800 : if (l != list_head(path->bitmapquals))
6562 tgl 1158 CBC 12900 : totalCost += 100.0 * cpu_operator_cost;
6562 tgl 1159 ECB : }
6562 tgl 1160 CBC 12900 : path->bitmapselectivity = selec;
4090 tgl 1161 GIC 12900 : path->path.rows = 0; /* per above, not used */
6562 tgl 1162 CBC 12900 : path->path.startup_cost = totalCost;
1163 12900 : path->path.total_cost = totalCost;
1164 12900 : }
6562 tgl 1165 ECB :
1166 : /*
1167 : * cost_bitmap_or_node
1168 : * Estimate the cost of a BitmapOr node
1169 : *
1170 : * See comments for cost_bitmap_and_node.
1171 : */
1172 : void
6517 tgl 1173 GIC 356 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1174 : {
6562 tgl 1175 ECB : Cost totalCost;
1176 : Selectivity selec;
1177 : ListCell *l;
1178 :
1179 : /*
1180 : * We estimate OR selectivity on the assumption that the inputs are
1181 : * non-overlapping, since that's often the case in "x IN (list)" type
1182 : * situations. Of course, we clamp to 1.0 at the end.
1183 : *
1184 : * The runtime cost of the BitmapOr itself is estimated at 100x
1185 : * cpu_operator_cost for each tbm_union needed. Probably too small,
1186 : * definitely too simplistic? We are aware that the tbm_unions are
1187 : * optimized out when the inputs are BitmapIndexScans.
1188 : */
6562 tgl 1189 GIC 356 : totalCost = 0.0;
1190 356 : selec = 0.0;
6562 tgl 1191 CBC 1098 : foreach(l, path->bitmapquals)
6562 tgl 1192 ECB : {
6385 bruce 1193 CBC 742 : Path *subpath = (Path *) lfirst(l);
1194 : Cost subCost;
6562 tgl 1195 ECB : Selectivity subselec;
1196 :
6562 tgl 1197 GIC 742 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1198 :
6562 tgl 1199 CBC 742 : selec += subselec;
1200 :
1201 742 : totalCost += subCost;
6562 tgl 1202 GIC 742 : if (l != list_head(path->bitmapquals) &&
6562 tgl 1203 CBC 386 : !IsA(subpath, IndexPath))
1204 15 : totalCost += 100.0 * cpu_operator_cost;
6562 tgl 1205 ECB : }
6562 tgl 1206 CBC 356 : path->bitmapselectivity = Min(selec, 1.0);
4090 tgl 1207 GIC 356 : path->path.rows = 0; /* per above, not used */
6562 tgl 1208 CBC 356 : path->path.startup_cost = totalCost;
1209 356 : path->path.total_cost = totalCost;
1210 356 : }
6562 tgl 1211 ECB :
8538 bruce 1212 : /*
1213 : * cost_tidscan
1214 : * Determines and returns the cost of scanning a relation using TIDs.
1215 : *
1216 : * 'baserel' is the relation to be scanned
1217 : * 'tidquals' is the list of TID-checkable quals
1218 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1219 : */
1220 : void
6517 tgl 1221 GIC 378 : cost_tidscan(Path *path, PlannerInfo *root,
1222 : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
8538 bruce 1223 ECB : {
8454 tgl 1224 GIC 378 : Cost startup_cost = 0;
1225 378 : Cost run_cost = 0;
5646 tgl 1226 CBC 378 : bool isCurrentOf = false;
3878 tgl 1227 ECB : QualCost qpqual_cost;
8454 1228 : Cost cpu_per_tuple;
1229 : QualCost tid_qual_cost;
1230 : int ntuples;
1231 : ListCell *l;
1232 : double spc_random_page_cost;
1233 :
1234 : /* Should only be applied to base relations */
7365 tgl 1235 GIC 378 : Assert(baserel->relid > 0);
7637 1236 378 : Assert(baserel->rtekind == RTE_RELATION);
7637 tgl 1237 ECB :
3878 1238 : /* Mark the path with the correct row estimate */
3878 tgl 1239 GIC 378 : if (param_info)
1240 72 : path->rows = param_info->ppi_rows;
3878 tgl 1241 ECB : else
3878 tgl 1242 CBC 306 : path->rows = baserel->rows;
1243 :
6343 tgl 1244 ECB : /* Count how many tuples we expect to retrieve */
6343 tgl 1245 GIC 378 : ntuples = 0;
1246 768 : foreach(l, tidquals)
6343 tgl 1247 ECB : {
1561 tgl 1248 CBC 390 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1561 tgl 1249 GIC 390 : Expr *qual = rinfo->clause;
1561 tgl 1250 ECB :
1561 tgl 1251 CBC 390 : if (IsA(qual, ScalarArrayOpExpr))
1252 : {
6343 tgl 1253 ECB : /* Each element of the array yields 1 tuple */
1561 tgl 1254 GIC 15 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
6031 bruce 1255 15 : Node *arraynode = (Node *) lsecond(saop->args);
6343 tgl 1256 ECB :
6343 tgl 1257 CBC 15 : ntuples += estimate_array_length(arraynode);
1258 : }
1561 1259 375 : else if (IsA(qual, CurrentOfExpr))
1260 : {
5646 tgl 1261 ECB : /* CURRENT OF yields 1 tuple */
5646 tgl 1262 GIC 196 : isCurrentOf = true;
1263 196 : ntuples++;
5646 tgl 1264 ECB : }
6343 1265 : else
1266 : {
1267 : /* It's just CTID = something, count 1 tuple */
6343 tgl 1268 GIC 179 : ntuples++;
1269 : }
6343 tgl 1270 ECB : }
1271 :
1272 : /*
1273 : * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1274 : * understands how to do it correctly. Therefore, honor enable_tidscan
1275 : * only when CURRENT OF isn't present. Also note that cost_qual_eval
1276 : * counts a CurrentOfExpr as having startup cost disable_cost, which we
1277 : * subtract off here; that's to prevent other plan types such as seqscan
1278 : * from winning.
1279 : */
5646 tgl 1280 GIC 378 : if (isCurrentOf)
1281 : {
5646 tgl 1282 CBC 196 : Assert(baserel->baserestrictcost.startup >= disable_cost);
5646 tgl 1283 GIC 196 : startup_cost -= disable_cost;
5646 tgl 1284 ECB : }
5646 tgl 1285 CBC 182 : else if (!enable_tidscan)
5646 tgl 1286 UIC 0 : startup_cost += disable_cost;
5646 tgl 1287 ECB :
5781 tgl 1288 EUB : /*
1289 : * The TID qual expressions will be computed once, any other baserestrict
1290 : * quals once per retrieved tuple.
1291 : */
5781 tgl 1292 GIC 378 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1293 :
4842 rhaas 1294 ECB : /* fetch estimated page cost for tablespace containing table */
4842 rhaas 1295 GIC 378 : get_tablespace_page_costs(baserel->reltablespace,
1296 : &spc_random_page_cost,
4842 rhaas 1297 ECB : NULL);
1298 :
1299 : /* disk costs --- assume each tuple on a different page */
4842 rhaas 1300 GIC 378 : run_cost += spc_random_page_cost * ntuples;
1301 :
3878 tgl 1302 ECB : /* Add scanning CPU costs */
3878 tgl 1303 GIC 378 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1304 :
3878 tgl 1305 ECB : /* XXX currently we assume TID quals are a subset of qpquals */
3878 tgl 1306 GIC 378 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1307 378 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
5781 tgl 1308 CBC 378 : tid_qual_cost.per_tuple;
8454 1309 378 : run_cost += cpu_per_tuple * ntuples;
8454 tgl 1310 ECB :
2607 1311 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 1312 GIC 378 : startup_cost += path->pathtarget->cost.startup;
1313 378 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
2607 tgl 1314 ECB :
8454 tgl 1315 CBC 378 : path->startup_cost = startup_cost;
8454 tgl 1316 GIC 378 : path->total_cost = startup_cost + run_cost;
8538 bruce 1317 CBC 378 : }
8397 bruce 1318 ECB :
771 drowley 1319 : /*
1320 : * cost_tidrangescan
1321 : * Determines and sets the costs of scanning a relation using a range of
1322 : * TIDs for 'path'
1323 : *
1324 : * 'baserel' is the relation to be scanned
1325 : * 'tidrangequals' is the list of TID-checkable range quals
1326 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1327 : */
1328 : void
771 drowley 1329 GIC 101 : cost_tidrangescan(Path *path, PlannerInfo *root,
1330 : RelOptInfo *baserel, List *tidrangequals,
771 drowley 1331 ECB : ParamPathInfo *param_info)
1332 : {
1333 : Selectivity selectivity;
1334 : double pages;
771 drowley 1335 GIC 101 : Cost startup_cost = 0;
1336 101 : Cost run_cost = 0;
771 drowley 1337 ECB : QualCost qpqual_cost;
1338 : Cost cpu_per_tuple;
1339 : QualCost tid_qual_cost;
1340 : double ntuples;
1341 : double nseqpages;
1342 : double spc_random_page_cost;
1343 : double spc_seq_page_cost;
1344 :
1345 : /* Should only be applied to base relations */
771 drowley 1346 GIC 101 : Assert(baserel->relid > 0);
1347 101 : Assert(baserel->rtekind == RTE_RELATION);
771 drowley 1348 ECB :
1349 : /* Mark the path with the correct row estimate */
771 drowley 1350 GIC 101 : if (param_info)
771 drowley 1351 UIC 0 : path->rows = param_info->ppi_rows;
771 drowley 1352 ECB : else
771 drowley 1353 GBC 101 : path->rows = baserel->rows;
1354 :
771 drowley 1355 ECB : /* Count how many tuples and pages we expect to scan */
771 drowley 1356 GIC 101 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1357 : JOIN_INNER, NULL);
771 drowley 1358 CBC 101 : pages = ceil(selectivity * baserel->pages);
1359 :
1360 101 : if (pages <= 0.0)
771 drowley 1361 GIC 21 : pages = 1.0;
771 drowley 1362 ECB :
1363 : /*
1364 : * The first page in a range requires a random seek, but each subsequent
1365 : * page is just a normal sequential page read. NOTE: it's desirable for
1366 : * TID Range Scans to cost more than the equivalent Sequential Scans,
1367 : * because Seq Scans have some performance advantages such as scan
1368 : * synchronization and parallelizability, and we'd prefer one of them to
1369 : * be picked unless a TID Range Scan really is better.
1370 : */
771 drowley 1371 GIC 101 : ntuples = selectivity * baserel->tuples;
1372 101 : nseqpages = pages - 1.0;
771 drowley 1373 ECB :
771 drowley 1374 CBC 101 : if (!enable_tidscan)
771 drowley 1375 UIC 0 : startup_cost += disable_cost;
771 drowley 1376 ECB :
771 drowley 1377 EUB : /*
1378 : * The TID qual expressions will be computed once, any other baserestrict
1379 : * quals once per retrieved tuple.
1380 : */
771 drowley 1381 GIC 101 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1382 :
771 drowley 1383 ECB : /* fetch estimated page cost for tablespace containing table */
771 drowley 1384 GIC 101 : get_tablespace_page_costs(baserel->reltablespace,
1385 : &spc_random_page_cost,
771 drowley 1386 ECB : &spc_seq_page_cost);
1387 :
1388 : /* disk costs; 1 random page and the remainder as seq pages */
771 drowley 1389 GIC 101 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1390 :
771 drowley 1391 ECB : /* Add scanning CPU costs */
771 drowley 1392 GIC 101 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1393 :
771 drowley 1394 ECB : /*
1395 : * XXX currently we assume TID quals are a subset of qpquals at this
1396 : * point; they will be removed (if possible) when we create the plan, so
1397 : * we subtract their cost from the total qpqual cost. (If the TID quals
1398 : * can't be removed, this is a mistake and we're going to underestimate
1399 : * the CPU cost a bit.)
1400 : */
771 drowley 1401 GIC 101 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1402 101 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
771 drowley 1403 CBC 101 : tid_qual_cost.per_tuple;
1404 101 : run_cost += cpu_per_tuple * ntuples;
771 drowley 1405 ECB :
1406 : /* tlist eval costs are paid per output row, not per tuple scanned */
771 drowley 1407 GIC 101 : startup_cost += path->pathtarget->cost.startup;
1408 101 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
771 drowley 1409 ECB :
771 drowley 1410 CBC 101 : path->startup_cost = startup_cost;
771 drowley 1411 GIC 101 : path->total_cost = startup_cost + run_cost;
771 drowley 1412 CBC 101 : }
771 drowley 1413 ECB :
7209 tgl 1414 : /*
1415 : * cost_subqueryscan
1416 : * Determines and returns the cost of scanning a subquery RTE.
1417 : *
1418 : * 'baserel' is the relation to be scanned
1419 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1420 : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1421 : */
1422 : void
2589 tgl 1423 GIC 10523 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1424 : RelOptInfo *baserel, ParamPathInfo *param_info,
1425 : bool trivial_pathtarget)
1426 : {
7209 tgl 1427 ECB : Cost startup_cost;
1428 : Cost run_cost;
1429 : List *qpquals;
1430 : QualCost qpqual_cost;
1431 : Cost cpu_per_tuple;
1432 :
1433 : /* Should only be applied to base relations that are subqueries */
7209 tgl 1434 GIC 10523 : Assert(baserel->relid > 0);
1435 10523 : Assert(baserel->rtekind == RTE_SUBQUERY);
1436 :
1437 : /*
340 tgl 1438 ECB : * We compute the rowcount estimate as the subplan's estimate times the
1439 : * selectivity of relevant restriction clauses. In simple cases this will
1440 : * come out the same as baserel->rows; but when dealing with parallelized
1441 : * paths we must do it like this to get the right answer.
1442 : */
4007 tgl 1443 GIC 10523 : if (param_info)
340 1444 210 : qpquals = list_concat_copy(param_info->ppi_clauses,
1445 210 : baserel->baserestrictinfo);
1446 : else
340 tgl 1447 CBC 10313 : qpquals = baserel->baserestrictinfo;
340 tgl 1448 ECB :
340 tgl 1449 CBC 10523 : path->path.rows = clamp_row_est(path->subpath->rows *
340 tgl 1450 GIC 10523 : clauselist_selectivity(root,
340 tgl 1451 ECB : qpquals,
1452 : 0,
1453 : JOIN_INNER,
1454 : NULL));
1455 :
1456 : /*
1457 : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1458 : * any restriction clauses and tlist that will be attached to the
1459 : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1460 : * projection overhead.
1461 : */
2589 tgl 1462 GIC 10523 : path->path.startup_cost = path->subpath->startup_cost;
1463 10523 : path->path.total_cost = path->subpath->total_cost;
1464 :
1465 : /*
1466 : * However, if there are no relevant restriction clauses and the
1467 : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1468 : * the SubqueryScan plan node altogether, so we should just make its cost
1469 : * and rowcount equal to the input path's.
1470 : *
1471 : * Note: there are some edge cases where createplan.c will apply a
1472 : * different targetlist to the SubqueryScan node, thus falsifying our
1473 : * current estimate of whether the target is trivial, and making the cost
1474 : * estimate (though not the rowcount) wrong. It does not seem worth the
1475 : * extra complication to try to account for that exactly, especially since
1476 : * that behavior falsifies other cost estimates as well.
1477 : */
264 tgl 1478 GNC 10523 : if (qpquals == NIL && trivial_pathtarget)
1479 5547 : return;
1480 :
4007 tgl 1481 GIC 4976 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
4007 tgl 1482 ECB :
4007 tgl 1483 CBC 4976 : startup_cost = qpqual_cost.startup;
4007 tgl 1484 GIC 4976 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
340 1485 4976 : run_cost = cpu_per_tuple * path->subpath->rows;
1486 :
1487 : /* tlist eval costs are paid per output row, not per tuple scanned */
2589 1488 4976 : startup_cost += path->path.pathtarget->cost.startup;
1489 4976 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1490 :
1491 4976 : path->path.startup_cost += startup_cost;
1492 4976 : path->path.total_cost += startup_cost + run_cost;
1493 : }
1494 :
1495 : /*
1496 : * cost_functionscan
1497 : * Determines and returns the cost of scanning a function RTE.
3897 tgl 1498 ECB : *
1499 : * 'baserel' is the relation to be scanned
1500 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
7637 1501 : */
1502 : void
3897 tgl 1503 CBC 17699 : cost_functionscan(Path *path, PlannerInfo *root,
3897 tgl 1504 ECB : RelOptInfo *baserel, ParamPathInfo *param_info)
7637 1505 : {
7637 tgl 1506 GIC 17699 : Cost startup_cost = 0;
1507 17699 : Cost run_cost = 0;
3897 tgl 1508 ECB : QualCost qpqual_cost;
7637 1509 : Cost cpu_per_tuple;
1510 : RangeTblEntry *rte;
5921 1511 : QualCost exprcost;
7637 1512 :
1513 : /* Should only be applied to base relations that are functions */
7365 tgl 1514 GIC 17699 : Assert(baserel->relid > 0);
5832 1515 17699 : rte = planner_rt_fetch(baserel->relid, root);
5921 1516 17699 : Assert(rte->rtekind == RTE_FUNCTION);
1517 :
1518 : /* Mark the path with the correct row estimate */
3897 1519 17699 : if (param_info)
1520 3086 : path->rows = param_info->ppi_rows;
1521 : else
1522 14613 : path->rows = baserel->rows;
4090 tgl 1523 ECB :
1524 : /*
1525 : * Estimate costs of executing the function expression(s).
4957 1526 : *
3426 1527 : * Currently, nodeFunctionscan.c always executes the functions to
1528 : * completion before returning any rows, and caches the results in a
1529 : * tuplestore. So the function eval cost is all startup cost, and per-row
1530 : * costs are minimal.
1531 : *
1532 : * XXX in principle we ought to charge tuplestore spill costs if the
1533 : * number of rows is large. However, given how phony our rowcount
4790 bruce 1534 : * estimates for functions tend to be, there's not a lot of point in that
1535 : * refinement right now.
4957 tgl 1536 : */
3426 tgl 1537 GIC 17699 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1538 :
4957 tgl 1539 CBC 17699 : startup_cost += exprcost.startup + exprcost.per_tuple;
7637 tgl 1540 ECB :
1541 : /* Add scanning CPU costs */
3897 tgl 1542 CBC 17699 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1543 :
3897 tgl 1544 GIC 17699 : startup_cost += qpqual_cost.startup;
1545 17699 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
7637 1546 17699 : run_cost += cpu_per_tuple * baserel->tuples;
1547 :
1548 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 1549 17699 : startup_cost += path->pathtarget->cost.startup;
1550 17699 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1551 :
7637 1552 17699 : path->startup_cost = startup_cost;
1553 17699 : path->total_cost = startup_cost + run_cost;
1554 17699 : }
1555 :
1556 : /*
2223 alvherre 1557 ECB : * cost_tablefuncscan
1558 : * Determines and returns the cost of scanning a table function.
1559 : *
1560 : * 'baserel' is the relation to be scanned
1561 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1562 : */
1563 : void
2223 alvherre 1564 CBC 108 : cost_tablefuncscan(Path *path, PlannerInfo *root,
2223 alvherre 1565 ECB : RelOptInfo *baserel, ParamPathInfo *param_info)
1566 : {
2223 alvherre 1567 GIC 108 : Cost startup_cost = 0;
1568 108 : Cost run_cost = 0;
2223 alvherre 1569 ECB : QualCost qpqual_cost;
1570 : Cost cpu_per_tuple;
1571 : RangeTblEntry *rte;
1572 : QualCost exprcost;
1573 :
1574 : /* Should only be applied to base relations that are functions */
2223 alvherre 1575 GIC 108 : Assert(baserel->relid > 0);
1576 108 : rte = planner_rt_fetch(baserel->relid, root);
1577 108 : Assert(rte->rtekind == RTE_TABLEFUNC);
1578 :
1579 : /* Mark the path with the correct row estimate */
1580 108 : if (param_info)
1581 72 : path->rows = param_info->ppi_rows;
1582 : else
1583 36 : path->rows = baserel->rows;
2223 alvherre 1584 ECB :
1585 : /*
1586 : * Estimate costs of executing the table func expression(s).
1587 : *
1588 : * XXX in principle we ought to charge tuplestore spill costs if the
1589 : * number of rows is large. However, given how phony our rowcount
1590 : * estimates for tablefuncs tend to be, there's not a lot of point in that
1591 : * refinement right now.
1592 : */
2223 alvherre 1593 GIC 108 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1594 :
2223 alvherre 1595 CBC 108 : startup_cost += exprcost.startup + exprcost.per_tuple;
2223 alvherre 1596 ECB :
1597 : /* Add scanning CPU costs */
2223 alvherre 1598 GIC 108 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1599 :
2223 alvherre 1600 CBC 108 : startup_cost += qpqual_cost.startup;
1601 108 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
2223 alvherre 1602 GIC 108 : run_cost += cpu_per_tuple * baserel->tuples;
2223 alvherre 1603 ECB :
1604 : /* tlist eval costs are paid per output row, not per tuple scanned */
2223 alvherre 1605 GIC 108 : startup_cost += path->pathtarget->cost.startup;
1606 108 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1607 :
1608 108 : path->startup_cost = startup_cost;
1609 108 : path->total_cost = startup_cost + run_cost;
1610 108 : }
1611 :
1612 : /*
6094 mail 1613 ECB : * cost_valuesscan
1614 : * Determines and returns the cost of scanning a VALUES RTE.
3892 tgl 1615 : *
1616 : * 'baserel' is the relation to be scanned
1617 : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
6094 mail 1618 : */
1619 : void
3892 tgl 1620 CBC 3553 : cost_valuesscan(Path *path, PlannerInfo *root,
3892 tgl 1621 ECB : RelOptInfo *baserel, ParamPathInfo *param_info)
6094 mail 1622 : {
6094 mail 1623 GIC 3553 : Cost startup_cost = 0;
1624 3553 : Cost run_cost = 0;
3892 tgl 1625 ECB : QualCost qpqual_cost;
6094 mail 1626 : Cost cpu_per_tuple;
1627 :
1628 : /* Should only be applied to base relations that are values lists */
6094 mail 1629 CBC 3553 : Assert(baserel->relid > 0);
1630 3553 : Assert(baserel->rtekind == RTE_VALUES);
1631 :
1632 : /* Mark the path with the correct row estimate */
3892 tgl 1633 GIC 3553 : if (param_info)
1634 24 : path->rows = param_info->ppi_rows;
1635 : else
1636 3529 : path->rows = baserel->rows;
1637 :
1638 : /*
1639 : * For now, estimate list evaluation cost at one operator eval per list
6031 bruce 1640 ECB : * (probably pretty bogus, but is it worth being smarter?)
1641 : */
6094 mail 1642 GIC 3553 : cpu_per_tuple = cpu_operator_cost;
6094 mail 1643 ECB :
1644 : /* Add scanning CPU costs */
3892 tgl 1645 GIC 3553 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1646 :
1647 3553 : startup_cost += qpqual_cost.startup;
1648 3553 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
6094 mail 1649 CBC 3553 : run_cost += cpu_per_tuple * baserel->tuples;
6094 mail 1650 ECB :
1651 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 1652 GIC 3553 : startup_cost += path->pathtarget->cost.startup;
2607 tgl 1653 CBC 3553 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
2607 tgl 1654 ECB :
6094 mail 1655 GIC 3553 : path->startup_cost = startup_cost;
6094 mail 1656 CBC 3553 : path->total_cost = startup_cost + run_cost;
6094 mail 1657 GIC 3553 : }
1658 :
1659 : /*
1660 : * cost_ctescan
1661 : * Determines and returns the cost of scanning a CTE RTE.
5300 tgl 1662 ECB : *
1663 : * Note: this is used for both self-reference and regular CTEs; the
1664 : * possible cost differences are below the threshold of what we could
3260 bruce 1665 : * estimate accurately anyway. Note that the costs of evaluating the
1666 : * referenced CTE query are added into the final plan as initplan costs,
5300 tgl 1667 : * and should NOT be counted here.
1668 : */
1669 : void
3878 tgl 1670 GIC 1597 : cost_ctescan(Path *path, PlannerInfo *root,
1671 : RelOptInfo *baserel, ParamPathInfo *param_info)
5300 tgl 1672 ECB : {
5300 tgl 1673 CBC 1597 : Cost startup_cost = 0;
5300 tgl 1674 GIC 1597 : Cost run_cost = 0;
3878 tgl 1675 ECB : QualCost qpqual_cost;
5300 1676 : Cost cpu_per_tuple;
1677 :
1678 : /* Should only be applied to base relations that are CTEs */
5300 tgl 1679 GIC 1597 : Assert(baserel->relid > 0);
1680 1597 : Assert(baserel->rtekind == RTE_CTE);
1681 :
1682 : /* Mark the path with the correct row estimate */
3878 1683 1597 : if (param_info)
3878 tgl 1684 UIC 0 : path->rows = param_info->ppi_rows;
1685 : else
3878 tgl 1686 GIC 1597 : path->rows = baserel->rows;
1687 :
1688 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
5300 1689 1597 : cpu_per_tuple = cpu_tuple_cost;
5300 tgl 1690 ECB :
1691 : /* Add scanning CPU costs */
3878 tgl 1692 GIC 1597 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
3878 tgl 1693 ECB :
3878 tgl 1694 CBC 1597 : startup_cost += qpqual_cost.startup;
3878 tgl 1695 GIC 1597 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
5300 1696 1597 : run_cost += cpu_per_tuple * baserel->tuples;
1697 :
1698 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 1699 CBC 1597 : startup_cost += path->pathtarget->cost.startup;
1700 1597 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1701 :
5300 tgl 1702 GIC 1597 : path->startup_cost = startup_cost;
5300 tgl 1703 CBC 1597 : path->total_cost = startup_cost + run_cost;
5300 tgl 1704 GBC 1597 : }
1705 :
2200 kgrittn 1706 ECB : /*
1707 : * cost_namedtuplestorescan
1708 : * Determines and returns the cost of scanning a named tuplestore.
1709 : */
1710 : void
2200 kgrittn 1711 GIC 219 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
2200 kgrittn 1712 ECB : RelOptInfo *baserel, ParamPathInfo *param_info)
1713 : {
2200 kgrittn 1714 CBC 219 : Cost startup_cost = 0;
1715 219 : Cost run_cost = 0;
2200 kgrittn 1716 ECB : QualCost qpqual_cost;
1717 : Cost cpu_per_tuple;
1718 :
1719 : /* Should only be applied to base relations that are Tuplestores */
2200 kgrittn 1720 CBC 219 : Assert(baserel->relid > 0);
2200 kgrittn 1721 GIC 219 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
2200 kgrittn 1722 ECB :
1723 : /* Mark the path with the correct row estimate */
2200 kgrittn 1724 CBC 219 : if (param_info)
2200 kgrittn 1725 UIC 0 : path->rows = param_info->ppi_rows;
1726 : else
2200 kgrittn 1727 GIC 219 : path->rows = baserel->rows;
1728 :
1729 : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1730 219 : cpu_per_tuple = cpu_tuple_cost;
2200 kgrittn 1731 ECB :
1732 : /* Add scanning CPU costs */
2200 kgrittn 1733 GIC 219 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
2200 kgrittn 1734 ECB :
2200 kgrittn 1735 CBC 219 : startup_cost += qpqual_cost.startup;
2200 kgrittn 1736 GIC 219 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1737 219 : run_cost += cpu_per_tuple * baserel->tuples;
1738 :
1739 219 : path->startup_cost = startup_cost;
2200 kgrittn 1740 CBC 219 : path->total_cost = startup_cost + run_cost;
1741 219 : }
1742 :
1743 : /*
1532 tgl 1744 ECB : * cost_resultscan
1532 tgl 1745 EUB : * Determines and returns the cost of scanning an RTE_RESULT relation.
1746 : */
1532 tgl 1747 ECB : void
1532 tgl 1748 GIC 685 : cost_resultscan(Path *path, PlannerInfo *root,
1749 : RelOptInfo *baserel, ParamPathInfo *param_info)
1532 tgl 1750 ECB : {
1532 tgl 1751 GIC 685 : Cost startup_cost = 0;
1752 685 : Cost run_cost = 0;
1532 tgl 1753 ECB : QualCost qpqual_cost;
1754 : Cost cpu_per_tuple;
1755 :
1756 : /* Should only be applied to RTE_RESULT base relations */
1532 tgl 1757 CBC 685 : Assert(baserel->relid > 0);
1532 tgl 1758 GIC 685 : Assert(baserel->rtekind == RTE_RESULT);
1532 tgl 1759 ECB :
1760 : /* Mark the path with the correct row estimate */
1532 tgl 1761 CBC 685 : if (param_info)
1532 tgl 1762 GIC 63 : path->rows = param_info->ppi_rows;
1763 : else
1764 622 : path->rows = baserel->rows;
1765 :
1766 : /* We charge qual cost plus cpu_tuple_cost */
1767 685 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1532 tgl 1768 ECB :
1532 tgl 1769 GIC 685 : startup_cost += qpqual_cost.startup;
1770 685 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1532 tgl 1771 CBC 685 : run_cost += cpu_per_tuple * baserel->tuples;
1532 tgl 1772 ECB :
1532 tgl 1773 GIC 685 : path->startup_cost = startup_cost;
1774 685 : path->total_cost = startup_cost + run_cost;
1775 685 : }
1776 :
5300 tgl 1777 ECB : /*
1778 : * cost_recursive_union
1779 : * Determines and returns the cost of performing a recursive union,
1780 : * and also the estimated output size.
1781 : *
2589 1782 : * We are given Paths for the nonrecursive and recursive terms.
1783 : */
5300 1784 : void
2589 tgl 1785 GIC 354 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1786 : {
5300 tgl 1787 ECB : Cost startup_cost;
1788 : Cost total_cost;
1789 : double total_rows;
1790 :
1791 : /* We probably have decent estimates for the non-recursive term */
5300 tgl 1792 GIC 354 : startup_cost = nrterm->startup_cost;
5300 tgl 1793 CBC 354 : total_cost = nrterm->total_cost;
2589 1794 354 : total_rows = nrterm->rows;
5300 tgl 1795 ECB :
1796 : /*
1797 : * We arbitrarily assume that about 10 recursive iterations will be
1798 : * needed, and that we've managed to get a good fix on the cost and output
1799 : * size of each one of them. These are mighty shaky assumptions but it's
1800 : * hard to see how to do better.
1801 : */
5300 tgl 1802 GIC 354 : total_cost += 10 * rterm->total_cost;
2589 1803 354 : total_rows += 10 * rterm->rows;
1804 :
5300 tgl 1805 ECB : /*
1806 : * Also charge cpu_tuple_cost per row to account for the costs of
1807 : * manipulating the tuplestores. (We don't worry about possible
1808 : * spill-to-disk costs.)
1809 : */
5300 tgl 1810 GIC 354 : total_cost += cpu_tuple_cost * total_rows;
1811 :
5300 tgl 1812 CBC 354 : runion->startup_cost = startup_cost;
1813 354 : runion->total_cost = total_cost;
2589 1814 354 : runion->rows = total_rows;
2589 tgl 1815 GIC 354 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1816 : rterm->pathtarget->width);
5300 1817 354 : }
1818 :
1819 : /*
1820 : * cost_tuplesort
1821 : * Determines and returns the cost of sorting a relation using tuplesort,
1098 tomas.vondra 1822 ECB : * not including the cost of reading the input data.
8454 tgl 1823 : *
1824 : * If the total volume of data to sort is less than sort_mem, we will do
1825 : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1826 : * comparisons for t tuples.
1827 : *
1828 : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1829 : * algorithm. There will still be about t*log2(t) tuple comparisons in
8491 1830 : * total, but we will also need to write and read each tuple once per
1831 : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
6258 1832 : * number of initial runs formed and M is the merge order used by tuplesort.c.
2557 rhaas 1833 : * Since the average initial run should be about sort_mem, we have
1834 : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
188 tgl 1835 : * cpu = comparison_cost * t * log2(t)
1836 : *
5819 1837 : * If the sort is bounded (i.e., only the first k result tuples are needed)
1838 : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1839 : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1840 : *
1841 : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1842 : * accesses (XXX can't we refine that guess?)
1843 : *
1844 : * By default, we charge two operator evals per tuple comparison, which should
1845 : * be in the right ballpark in most cases. The caller can tweak this by
1846 : * specifying nonzero comparison_cost; typically that's used for any extra
1847 : * work that has to be done to prepare the inputs to the comparison operators.
1848 : *
1849 : * 'tuples' is the number of tuples in the relation
1850 : * 'width' is the average tuple width in bytes
1851 : * 'comparison_cost' is the extra cost per comparison, if any
1852 : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1853 : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1854 : */
1855 : static void
188 tgl 1856 GIC 586586 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1857 : double tuples, int width,
1858 : Cost comparison_cost, int sort_mem,
1859 : double limit_tuples)
1860 : {
5819 1861 586586 : double input_bytes = relation_byte_size(tuples, width);
1862 : double output_bytes;
1863 : double output_tuples;
4567 1864 586586 : long sort_mem_bytes = sort_mem * 1024L;
1865 :
1866 : /*
1867 : * We want to be sure the cost of a sort is never estimated as zero, even
1868 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1869 : */
8491 1870 586586 : if (tuples < 2.0)
1871 181484 : tuples = 2.0;
1872 :
1873 : /* Include the default cost-per-comparison */
188 1874 586586 : comparison_cost += 2.0 * cpu_operator_cost;
1875 :
5819 tgl 1876 ECB : /* Do we have a useful LIMIT? */
5819 tgl 1877 GIC 586586 : if (limit_tuples > 0 && limit_tuples < tuples)
1878 : {
1879 1080 : output_tuples = limit_tuples;
1880 1080 : output_bytes = relation_byte_size(output_tuples, width);
5819 tgl 1881 ECB : }
1882 : else
1883 : {
5819 tgl 1884 CBC 585506 : output_tuples = tuples;
5819 tgl 1885 GIC 585506 : output_bytes = input_bytes;
1886 : }
1887 :
4567 1888 586586 : if (output_bytes > sort_mem_bytes)
1889 : {
5819 tgl 1890 ECB : /*
1891 : * We'll have to use a disk-based sort of all the tuples
1892 : */
5819 tgl 1893 GIC 7679 : double npages = ceil(input_bytes / BLCKSZ);
2557 rhaas 1894 CBC 7679 : double nruns = input_bytes / sort_mem_bytes;
4567 tgl 1895 GIC 7679 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1896 : double log_runs;
8454 tgl 1897 ECB : double npageaccesses;
1898 :
188 1899 : /*
1900 : * CPU costs
1901 : *
1902 : * Assume about N log2 N comparisons
1903 : */
188 tgl 1904 CBC 7679 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
5819 tgl 1905 ECB :
1906 : /* Disk costs */
1907 :
6258 1908 : /* Compute logM(r) as log(r) / log(M) */
6258 tgl 1909 GIC 7679 : if (nruns > mergeorder)
1910 2386 : log_runs = ceil(log(nruns) / log(mergeorder));
1911 : else
8491 1912 5293 : log_runs = 1.0;
8454 tgl 1913 CBC 7679 : npageaccesses = 2.0 * npages * log_runs;
6152 tgl 1914 ECB : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1098 tomas.vondra 1915 CBC 7679 : *startup_cost += npageaccesses *
6152 tgl 1916 GIC 7679 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1917 : }
4567 1918 578907 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1919 : {
1920 : /*
1921 : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1922 : * a total number of tuple comparisons of N log2 K; but the constant
1923 : * factor is a bit higher than for quicksort. Tweak it so that the
188 tgl 1924 ECB : * cost curve is continuous at the crossover point.
1925 : */
188 tgl 1926 GIC 616 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1927 : }
1928 : else
5819 tgl 1929 ECB : {
1930 : /* We'll use plain quicksort on all the input tuples */
188 tgl 1931 GIC 578291 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
5819 tgl 1932 ECB : }
8745 1933 :
1934 : /*
6385 bruce 1935 : * Also charge a small amount (arbitrarily set equal to operator cost) per
4797 tgl 1936 : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1937 : * doesn't do qual-checking or projection, so it has less overhead than
1938 : * most plan nodes. Note it's correct to use tuples not output_tuples
1939 : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1940 : * counting the LIMIT otherwise.
1941 : */
1098 tomas.vondra 1942 GIC 586586 : *run_cost = cpu_operator_cost * tuples;
1943 586586 : }
1944 :
1945 : /*
1098 tomas.vondra 1946 ECB : * cost_incremental_sort
1947 : * Determines and returns the cost of sorting a relation incrementally, when
1948 : * the input path is presorted by a prefix of the pathkeys.
1949 : *
1950 : * 'presorted_keys' is the number of leading pathkeys by which the input path
1951 : * is sorted.
1952 : *
1953 : * We estimate the number of groups into which the relation is divided by the
1954 : * leading pathkeys, and then calculate the cost of sorting a single group
1955 : * with tuplesort using cost_tuplesort().
1956 : */
1957 : void
1098 tomas.vondra 1958 GIC 2295 : cost_incremental_sort(Path *path,
1959 : PlannerInfo *root, List *pathkeys, int presorted_keys,
1960 : Cost input_startup_cost, Cost input_total_cost,
1961 : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1098 tomas.vondra 1962 ECB : double limit_tuples)
1963 : {
1964 : Cost startup_cost,
1965 : run_cost,
1098 tomas.vondra 1966 GIC 2295 : input_run_cost = input_total_cost - input_startup_cost;
1967 : double group_tuples,
1968 : input_groups;
1969 : Cost group_startup_cost,
1970 : group_run_cost,
1971 : group_input_run_cost;
1972 2295 : List *presortedExprs = NIL;
1973 : ListCell *l;
1081 1974 2295 : bool unknown_varno = false;
1975 :
114 drowley 1976 GNC 2295 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
1098 tomas.vondra 1977 ECB :
1978 : /*
1979 : * We want to be sure the cost of a sort is never estimated as zero, even
1980 : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1981 : */
1098 tomas.vondra 1982 GIC 2295 : if (input_tuples < 2.0)
1983 1029 : input_tuples = 2.0;
1984 :
1081 tomas.vondra 1985 ECB : /* Default estimate of number of groups, capped to one group per row. */
1081 tomas.vondra 1986 GIC 2295 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
1987 :
1988 : /*
1989 : * Extract presorted keys as list of expressions.
1990 : *
1060 tgl 1991 ECB : * We need to be careful about Vars containing "varno 0" which might have
1992 : * been introduced by generate_append_tlist, which would confuse
1081 tomas.vondra 1993 : * estimate_num_groups (in fact it'd fail for such expressions). See
1994 : * recurse_set_operations which has to deal with the same issue.
1995 : *
1996 : * Unlike recurse_set_operations we can't access the original target list
1997 : * here, and even if we could it's not very clear how useful would that be
1998 : * for a set operation combining multiple tables. So we simply detect if
1999 : * there are any expressions with "varno 0" and use the default
2000 : * DEFAULT_NUM_DISTINCT in that case.
2001 : *
1060 tgl 2002 : * We might also use either 1.0 (a single group) or input_tuples (each row
2003 : * being a separate group), pretty much the worst and best case for
2004 : * incremental sort. But those are extreme cases and using something in
1081 tomas.vondra 2005 : * between seems reasonable. Furthermore, generate_append_tlist is used
2006 : * for set operations, which are likely to produce mostly unique output
2007 : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2008 : * while maintaining lower startup cost.
2009 : */
1098 tomas.vondra 2010 GIC 2322 : foreach(l, pathkeys)
2011 : {
2012 2322 : PathKey *key = (PathKey *) lfirst(l);
2013 2322 : EquivalenceMember *member = (EquivalenceMember *)
2014 2322 : linitial(key->pk_eclass->ec_members);
2015 :
2016 : /*
2017 : * Check if the expression contains Var with "varno 0" so that we
2018 : * don't call estimate_num_groups in that case.
2019 : */
808 tgl 2020 2322 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2021 : {
1081 tomas.vondra 2022 3 : unknown_varno = true;
2023 3 : break;
2024 : }
2025 :
2026 : /* expression not containing any Vars with "varno 0" */
1098 2027 2319 : presortedExprs = lappend(presortedExprs, member->em_expr);
2028 :
114 drowley 2029 GNC 2319 : if (foreach_current_index(l) + 1 >= presorted_keys)
1098 tomas.vondra 2030 CBC 2292 : break;
1098 tomas.vondra 2031 ECB : }
2032 :
2033 : /* Estimate the number of groups with equal presorted keys. */
1081 tomas.vondra 2034 GIC 2295 : if (!unknown_varno)
740 drowley 2035 2292 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2036 : NULL, NULL);
2037 :
1098 tomas.vondra 2038 CBC 2295 : group_tuples = input_tuples / input_groups;
1098 tomas.vondra 2039 GIC 2295 : group_input_run_cost = input_run_cost / input_groups;
1098 tomas.vondra 2040 ECB :
2041 : /*
2042 : * Estimate the average cost of sorting of one group where presorted keys
2043 : * are equal.
2044 : */
188 tgl 2045 CBC 2295 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2046 : group_tuples, width, comparison_cost, sort_mem,
2047 : limit_tuples);
2048 :
1098 tomas.vondra 2049 ECB : /*
2050 : * Startup cost of incremental sort is the startup cost of its first group
2051 : * plus the cost of its input.
2052 : */
114 drowley 2053 GNC 2295 : startup_cost = group_startup_cost + input_startup_cost +
2054 : group_input_run_cost;
2055 :
2056 : /*
2057 : * After we started producing tuples from the first group, the cost of
2058 : * producing all the tuples is given by the cost to finish processing this
2059 : * group, plus the total cost to process the remaining groups, plus the
1098 tomas.vondra 2060 ECB : * remaining cost of input.
2061 : */
114 drowley 2062 GNC 2295 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2063 2295 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2064 :
2065 : /*
2066 : * Incremental sort adds some overhead by itself. Firstly, it has to
1098 tomas.vondra 2067 ECB : * detect the sort groups. This is roughly equal to one extra copy and
2068 : * comparison per tuple.
2069 : */
1098 tomas.vondra 2070 GIC 2295 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2071 :
2072 : /*
2073 : * Additionally, we charge double cpu_tuple_cost for each input group to
2074 : * account for the tuplesort_reset that's performed after each group.
2075 : */
2076 2295 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2077 :
2078 2295 : path->rows = input_tuples;
2079 2295 : path->startup_cost = startup_cost;
1098 tomas.vondra 2080 CBC 2295 : path->total_cost = startup_cost + run_cost;
2081 2295 : }
2082 :
2083 : /*
2084 : * cost_sort
2085 : * Determines and returns the cost of sorting a relation, including
2086 : * the cost of reading the input data.
2087 : *
1098 tomas.vondra 2088 ECB : * NOTE: some callers currently pass NIL for pathkeys because they
2089 : * can't conveniently supply the sort keys. Since this routine doesn't
2090 : * currently do anything with pathkeys anyway, that doesn't matter...
2091 : * but if it ever does, it should react gracefully to lack of key data.
2092 : * (Actually, the thing we'd most likely be interested in is just the number
2093 : * of sort keys, which all callers *could* supply.)
2094 : */
2095 : void
1098 tomas.vondra 2096 CBC 584291 : cost_sort(Path *path, PlannerInfo *root,
1098 tomas.vondra 2097 ECB : List *pathkeys, Cost input_cost, double tuples, int width,
2098 : Cost comparison_cost, int sort_mem,
2099 : double limit_tuples)
2100 :
2101 : {
2102 : Cost startup_cost;
2103 : Cost run_cost;
2104 :
188 tgl 2105 GIC 584291 : cost_tuplesort(&startup_cost, &run_cost,
2106 : tuples, width,
2107 : comparison_cost, sort_mem,
2108 : limit_tuples);
2109 :
1098 tomas.vondra 2110 584291 : if (!enable_sort)
2111 589 : startup_cost += disable_cost;
2112 :
2113 584291 : startup_cost += input_cost;
1098 tomas.vondra 2114 ECB :
1098 tomas.vondra 2115 GIC 584291 : path->rows = tuples;
8454 tgl 2116 584291 : path->startup_cost = startup_cost;
2117 584291 : path->total_cost = startup_cost + run_cost;
9770 scrappy 2118 584291 : }
2119 :
2120 : /*
2121 : * append_nonpartial_cost
2122 : * Estimate the cost of the non-partial paths in a Parallel Append.
1951 rhaas 2123 ECB : * The non-partial paths are assumed to be the first "numpaths" paths
2124 : * from the subpaths list, and to be in order of decreasing cost.
2125 : */
2126 : static Cost
1951 rhaas 2127 GIC 6833 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
1951 rhaas 2128 ECB : {
2129 : Cost *costarr;
2130 : int arrlen;
2131 : ListCell *l;
2132 : ListCell *cell;
2133 : int path_index;
2134 : int min_index;
2135 : int max_index;
2136 :
1951 rhaas 2137 GIC 6833 : if (numpaths == 0)
2138 6333 : return 0;
2139 :
2140 : /*
2141 : * Array length is number of workers or number of relevant paths,
2142 : * whichever is less.
2143 : */
1951 rhaas 2144 CBC 500 : arrlen = Min(parallel_workers, numpaths);
1951 rhaas 2145 GIC 500 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2146 :
2147 : /* The first few paths will each be claimed by a different worker. */
2148 500 : path_index = 0;
2149 1283 : foreach(cell, subpaths)
2150 : {
2151 1188 : Path *subpath = (Path *) lfirst(cell);
2152 :
2153 1188 : if (path_index == arrlen)
1951 rhaas 2154 CBC 405 : break;
2155 783 : costarr[path_index++] = subpath->total_cost;
2156 : }
2157 :
2158 : /*
2159 : * Since subpaths are sorted by decreasing cost, the last one will have
2160 : * the minimum cost.
1951 rhaas 2161 ECB : */
1951 rhaas 2162 CBC 500 : min_index = arrlen - 1;
2163 :
2164 : /*
1951 rhaas 2165 ECB : * For each of the remaining subpaths, add its cost to the array element
2166 : * with minimum cost.
2167 : */
1364 tgl 2168 CBC 741 : for_each_cell(l, subpaths, cell)
2169 : {
1951 rhaas 2170 497 : Path *subpath = (Path *) lfirst(l);
1951 rhaas 2171 ECB :
2172 : /* Consider only the non-partial paths */
1951 rhaas 2173 GIC 497 : if (path_index++ == numpaths)
2174 256 : break;
2175 :
2176 241 : costarr[min_index] += subpath->total_cost;
2177 :
1951 rhaas 2178 ECB : /* Update the new min cost array index */
228 drowley 2179 GNC 241 : min_index = 0;
2180 741 : for (int i = 0; i < arrlen; i++)
2181 : {
1951 rhaas 2182 GIC 500 : if (costarr[i] < costarr[min_index])
2183 101 : min_index = i;
2184 : }
1951 rhaas 2185 ECB : }
2186 :
2187 : /* Return the highest cost from the array */
228 drowley 2188 GNC 500 : max_index = 0;
2189 1283 : for (int i = 0; i < arrlen; i++)
2190 : {
1951 rhaas 2191 CBC 783 : if (costarr[i] > costarr[max_index])
2192 95 : max_index = i;
2193 : }
1951 rhaas 2194 ECB :
1951 rhaas 2195 GIC 500 : return costarr[max_index];
2196 : }
1951 rhaas 2197 ECB :
2198 : /*
2199 : * cost_append
2200 : * Determines and returns the cost of an Append node.
2201 : */
2202 : void
188 tgl 2203 GIC 21287 : cost_append(AppendPath *apath)
2204 : {
2205 : ListCell *l;
1951 rhaas 2206 ECB :
1951 rhaas 2207 CBC 21287 : apath->path.startup_cost = 0;
1951 rhaas 2208 GIC 21287 : apath->path.total_cost = 0;
1465 tgl 2209 CBC 21287 : apath->path.rows = 0;
1951 rhaas 2210 ECB :
1951 rhaas 2211 GIC 21287 : if (apath->subpaths == NIL)
2212 672 : return;
1951 rhaas 2213 ECB :
1951 rhaas 2214 GIC 20615 : if (!apath->path.parallel_aware)
2215 : {
1465 tgl 2216 13782 : List *pathkeys = apath->path.pathkeys;
2217 :
2218 13782 : if (pathkeys == NIL)
2219 : {
186 drowley 2220 GNC 12815 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
1465 tgl 2221 ECB :
2222 : /*
2223 : * For an unordered, non-parallel-aware Append we take the startup
2224 : * cost as the startup cost of the first subpath.
2225 : */
186 drowley 2226 GNC 12815 : apath->path.startup_cost = firstsubpath->startup_cost;
1951 rhaas 2227 ECB :
2228 : /* Compute rows and costs as sums of subplan rows and costs. */
1465 tgl 2229 CBC 50545 : foreach(l, apath->subpaths)
1465 tgl 2230 ECB : {
1465 tgl 2231 GIC 37730 : Path *subpath = (Path *) lfirst(l);
1465 tgl 2232 ECB :
1465 tgl 2233 GIC 37730 : apath->path.rows += subpath->rows;
1465 tgl 2234 CBC 37730 : apath->path.total_cost += subpath->total_cost;
2235 : }
1465 tgl 2236 ECB : }
2237 : else
2238 : {
2239 : /*
2240 : * For an ordered, non-parallel-aware Append we take the startup
2241 : * cost as the sum of the subpath startup costs. This ensures
2242 : * that we don't underestimate the startup cost when a query's
2243 : * LIMIT is such that several of the children have to be run to
2244 : * satisfy it. This might be overkill --- another plausible hack
2245 : * would be to take the Append's startup cost as the maximum of
2246 : * the child startup costs. But we don't want to risk believing
2247 : * that an ORDER BY LIMIT query can be satisfied at small cost
2248 : * when the first child has small startup cost but later ones
2249 : * don't. (If we had the ability to deal with nonlinear cost
2250 : * interpolation for partial retrievals, we would not need to be
2251 : * so conservative about this.)
2252 : *
2253 : * This case is also different from the above in that we have to
2254 : * account for possibly injecting sorts into subpaths that aren't
2255 : * natively ordered.
2256 : */
1465 tgl 2257 GIC 3774 : foreach(l, apath->subpaths)
2258 : {
2259 2807 : Path *subpath = (Path *) lfirst(l);
2260 : Path sort_path; /* dummy for result of cost_sort */
2261 :
2262 2807 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2263 : {
2264 : /*
2265 : * We'll need to insert a Sort node, so include costs for
2266 : * that. We can use the parent's LIMIT if any, since we
2267 : * certainly won't pull more than that many tuples from
2268 : * any child.
2269 : */
2270 22 : cost_sort(&sort_path,
2271 : NULL, /* doesn't currently need root */
2272 : pathkeys,
2273 : subpath->total_cost,
2274 : subpath->rows,
1465 tgl 2275 CBC 22 : subpath->pathtarget->width,
2276 : 0.0,
1465 tgl 2277 ECB : work_mem,
2278 : apath->limit_tuples);
1465 tgl 2279 GIC 22 : subpath = &sort_path;
1465 tgl 2280 ECB : }
2281 :
1465 tgl 2282 GIC 2807 : apath->path.rows += subpath->rows;
2283 2807 : apath->path.startup_cost += subpath->startup_cost;
2284 2807 : apath->path.total_cost += subpath->total_cost;
2285 : }
2286 : }
2287 : }
1951 rhaas 2288 ECB : else /* parallel-aware */
2289 : {
1951 rhaas 2290 GIC 6833 : int i = 0;
2291 6833 : double parallel_divisor = get_parallel_divisor(&apath->path);
2292 :
1465 tgl 2293 ECB : /* Parallel-aware Append never produces ordered output. */
1465 tgl 2294 GIC 6833 : Assert(apath->path.pathkeys == NIL);
2295 :
2296 : /* Calculate startup cost. */
1951 rhaas 2297 CBC 28323 : foreach(l, apath->subpaths)
2298 : {
1951 rhaas 2299 GIC 21490 : Path *subpath = (Path *) lfirst(l);
1951 rhaas 2300 ECB :
2301 : /*
2302 : * Append will start returning tuples when the child node having
2303 : * lowest startup cost is done setting up. We consider only the
2304 : * first few subplans that immediately get a worker assigned.
2305 : */
1951 rhaas 2306 GIC 21490 : if (i == 0)
2307 6833 : apath->path.startup_cost = subpath->startup_cost;
1951 rhaas 2308 CBC 14657 : else if (i < apath->path.parallel_workers)
2309 6680 : apath->path.startup_cost = Min(apath->path.startup_cost,
2310 : subpath->startup_cost);
2311 :
1951 rhaas 2312 ECB : /*
2313 : * Apply parallel divisor to subpaths. Scale the number of rows
2314 : * for each partial subpath based on the ratio of the parallel
1921 2315 : * divisor originally used for the subpath to the one we adopted.
2316 : * Also add the cost of partial paths to the total cost, but
2317 : * ignore non-partial paths for now.
2318 : */
1951 rhaas 2319 GIC 21490 : if (i < apath->first_partial_path)
2320 1024 : apath->path.rows += subpath->rows / parallel_divisor;
2321 : else
2322 : {
2323 : double subpath_parallel_divisor;
1921 rhaas 2324 ECB :
1921 rhaas 2325 CBC 20466 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2326 20466 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
1921 rhaas 2327 ECB : parallel_divisor);
1951 rhaas 2328 GIC 20466 : apath->path.total_cost += subpath->total_cost;
2329 : }
2330 :
1921 2331 21490 : apath->path.rows = clamp_row_est(apath->path.rows);
2332 :
1951 2333 21490 : i++;
2334 : }
2335 :
2336 : /* Add cost for non-partial subpaths. */
1951 rhaas 2337 CBC 6833 : apath->path.total_cost +=
2338 6833 : append_nonpartial_cost(apath->subpaths,
2339 : apath->first_partial_path,
2340 : apath->path.parallel_workers);
2341 : }
2342 :
1873 rhaas 2343 ECB : /*
2344 : * Although Append does not do any selection or projection, it's not free;
2345 : * add a small per-tuple overhead.
2346 : */
1873 rhaas 2347 GIC 20615 : apath->path.total_cost +=
2348 20615 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
1951 rhaas 2349 ECB : }
2350 :
4560 tgl 2351 : /*
2352 : * cost_merge_append
2353 : * Determines and returns the cost of a MergeAppend node.
2354 : *
2355 : * MergeAppend merges several pre-sorted input streams, using a heap that
3260 bruce 2356 : * at any given instant holds the next tuple from each stream. If there
2357 : * are N streams, we need about N*log2(N) tuple comparisons to construct
2358 : * the heap at startup, and then for each output tuple, about log2(N)
2359 : * comparisons to replace the top entry.
2360 : *
2361 : * (The effective value of N will drop once some of the input streams are
2362 : * exhausted, but it seems unlikely to be worth trying to account for that.)
2363 : *
2364 : * The heap is never spilled to disk, since we assume N is not very large.
4560 tgl 2365 : * So this is much simpler than cost_sort.
2366 : *
2367 : * As in cost_sort, we charge two operator evals per tuple comparison.
2368 : *
2369 : * 'pathkeys' is a list of sort keys
2370 : * 'n_streams' is the number of input streams
2371 : * 'input_startup_cost' is the sum of the input streams' startup costs
2372 : * 'input_total_cost' is the sum of the input streams' total costs
2373 : * 'tuples' is the number of tuples in all the streams
2374 : */
2375 : void
4560 tgl 2376 GIC 1785 : cost_merge_append(Path *path, PlannerInfo *root,
2377 : List *pathkeys, int n_streams,
2378 : Cost input_startup_cost, Cost input_total_cost,
2379 : double tuples)
2380 : {
2381 1785 : Cost startup_cost = 0;
2382 1785 : Cost run_cost = 0;
2383 : Cost comparison_cost;
2384 : double N;
2385 : double logN;
2386 :
2387 : /*
2388 : * Avoid log(0)...
2389 : */
2390 1785 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2391 1785 : logN = LOG2(N);
2392 :
2393 : /* Assumed cost per tuple comparison */
4560 tgl 2394 CBC 1785 : comparison_cost = 2.0 * cpu_operator_cost;
2395 :
2396 : /* Heap creation cost */
4560 tgl 2397 GIC 1785 : startup_cost += comparison_cost * N * logN;
2398 :
4560 tgl 2399 ECB : /* Per-tuple heap maintenance cost */
2346 tgl 2400 CBC 1785 : run_cost += tuples * comparison_cost * logN;
2401 :
2402 : /*
2403 : * Although MergeAppend does not do any selection or projection, it's not
2404 : * free; add a small per-tuple overhead.
2405 : */
1873 rhaas 2406 GIC 1785 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2407 :
4560 tgl 2408 CBC 1785 : path->startup_cost = startup_cost + input_startup_cost;
2409 1785 : path->total_cost = startup_cost + run_cost + input_total_cost;
4560 tgl 2410 GIC 1785 : }
2411 :
7435 tgl 2412 ECB : /*
2413 : * cost_material
2414 : * Determines and returns the cost of materializing a relation, including
2415 : * the cost of reading the input data.
2416 : *
2417 : * If the total volume of data to materialize exceeds work_mem, we will need
2418 : * to write it to disk, so the cost is much higher in that case.
2419 : *
2420 : * Note that here we are estimating the costs for the first scan of the
2421 : * relation, so the materialization is all overhead --- any savings will
2422 : * occur only on rescan, which is estimated in cost_rescan.
2423 : */
2424 : void
7435 tgl 2425 GIC 177675 : cost_material(Path *path,
4957 tgl 2426 ECB : Cost input_startup_cost, Cost input_total_cost,
2427 : double tuples, int width)
7435 2428 : {
4957 tgl 2429 GIC 177675 : Cost startup_cost = input_startup_cost;
2430 177675 : Cost run_cost = input_total_cost - input_startup_cost;
7435 2431 177675 : double nbytes = relation_byte_size(tuples, width);
7005 2432 177675 : long work_mem_bytes = work_mem * 1024L;
2433 :
4090 2434 177675 : path->rows = tuples;
2435 :
2436 : /*
2437 : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2438 : * reflect bookkeeping overhead. (This rate must be more than what
2439 : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2440 : * if it is exactly the same then there will be a cost tie between
2441 : * nestloop with A outer, materialized B inner and nestloop with B outer,
2442 : * materialized A inner. The extra cost ensures we'll prefer
4790 bruce 2443 ECB : * materializing the smaller rel.) Note that this is normally a good deal
2444 : * less than cpu_tuple_cost; which is OK because a Material plan node
2445 : * doesn't do qual-checking or projection, so it's got less overhead than
2446 : * most plan nodes.
4957 tgl 2447 : */
4797 tgl 2448 CBC 177675 : run_cost += 2 * cpu_operator_cost * tuples;
4957 tgl 2449 ECB :
2450 : /*
2451 : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2452 : * This cost is assumed to be evenly spread through the plan run phase,
2453 : * which isn't exactly accurate but our cost model doesn't allow for
2454 : * nonuniform costs within the run phase.
2455 : */
7005 tgl 2456 GIC 177675 : if (nbytes > work_mem_bytes)
2457 : {
7435 2458 1967 : double npages = ceil(nbytes / BLCKSZ);
2459 :
6152 2460 1967 : run_cost += seq_page_cost * npages;
2461 : }
2462 :
7435 2463 177675 : path->startup_cost = startup_cost;
2464 177675 : path->total_cost = startup_cost + run_cost;
2465 177675 : }
7435 tgl 2466 ECB :
2467 : /*
2468 : * cost_memoize_rescan
2469 : * Determines the estimated cost of rescanning a Memoize node.
2470 : *
2471 : * In order to estimate this, we must gain knowledge of how often we expect to
2472 : * be called and how many distinct sets of parameters we are likely to be
2473 : * called with. If we expect a good cache hit ratio, then we can set our
737 drowley 2474 : * costs to account for that hit ratio, plus a little bit of cost for the
2475 : * caching itself. Caching will not work out well if we expect to be called
2476 : * with too many distinct parameter values. The worst-case here is that we
2477 : * never see any parameter value twice, in which case we'd never get a cache
2478 : * hit and caching would be a complete waste of effort.
2479 : */
2480 : static void
634 drowley 2481 CBC 89794 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
634 drowley 2482 ECB : Cost *rescan_startup_cost, Cost *rescan_total_cost)
737 2483 : {
2484 : EstimationInfo estinfo;
2485 : ListCell *lc;
634 drowley 2486 GIC 89794 : Cost input_startup_cost = mpath->subpath->startup_cost;
2487 89794 : Cost input_total_cost = mpath->subpath->total_cost;
2488 89794 : double tuples = mpath->subpath->rows;
2489 89794 : double calls = mpath->calls;
2490 89794 : int width = mpath->subpath->pathtarget->width;
2491 :
2492 : double hash_mem_bytes;
2493 : double est_entry_bytes;
2494 : double est_cache_entries;
2495 : double ndistinct;
2496 : double evict_ratio;
2497 : double hit_ratio;
2498 : Cost startup_cost;
2499 : Cost total_cost;
737 drowley 2500 ECB :
2501 : /* available cache space */
623 tgl 2502 GIC 89794 : hash_mem_bytes = get_hash_memory_limit();
2503 :
2504 : /*
737 drowley 2505 ECB : * Set the number of bytes each cache entry should consume in the cache.
2506 : * To provide us with better estimations on how many cache entries we can
2507 : * store at once, we make a call to the executor here to ask it what
2508 : * memory overheads there are for a single cache entry.
2509 : */
737 drowley 2510 GIC 89794 : est_entry_bytes = relation_byte_size(tuples, width) +
2511 89794 : ExecEstimateCacheEntryOverheadBytes(tuples);
2512 :
2513 : /* include the estimated width for the cache keys */
20 drowley 2514 GNC 189220 : foreach(lc, mpath->param_exprs)
2515 99426 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2516 :
2517 : /* estimate on the upper limit of cache entries we can hold at once */
737 drowley 2518 GIC 89794 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2519 :
2520 : /* estimate on the distinct number of parameter values */
634 2521 89794 : ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
2522 : &estinfo);
737 drowley 2523 ECB :
2524 : /*
2525 : * When the estimation fell back on using a default value, it's a bit too
2526 : * risky to assume that it's ok to use a Memoize node. The use of a
2527 : * default could cause us to use a Memoize node when it's really
2528 : * inappropriate to do so. If we see that this has been done, then we'll
2529 : * assume that every call will have unique parameters, which will almost
2530 : * certainly mean a MemoizePath will never survive add_path().
2531 : */
737 drowley 2532 CBC 89794 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
737 drowley 2533 GIC 6052 : ndistinct = calls;
2534 :
737 drowley 2535 ECB : /*
2536 : * Since we've already estimated the maximum number of entries we can
2537 : * store at once and know the estimated number of distinct values we'll be
2538 : * called with, we'll take this opportunity to set the path's est_entries.
2539 : * This will ultimately determine the hash table size that the executor
2540 : * will use. If we leave this at zero, the executor will just choose the
2541 : * size itself. Really this is not the right place to do this, but it's
2542 : * convenient since everything is already calculated.
2543 : */
634 drowley 2544 GIC 89794 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
2545 : PG_UINT32_MAX);
2546 :
2547 : /*
2548 : * When the number of distinct parameter values is above the amount we can
2549 : * store in the cache, then we'll have to evict some entries from the
2550 : * cache. This is not free. Here we estimate how often we'll incur the
2551 : * cost of that eviction.
2552 : */
737 drowley 2553 CBC 89794 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
737 drowley 2554 ECB :
2555 : /*
2556 : * In order to estimate how costly a single scan will be, we need to
2557 : * attempt to estimate what the cache hit ratio will be. To do that we
2558 : * must look at how many scans are estimated in total for this node and
2559 : * how many of those scans we expect to get a cache hit.
2560 : */
18 drowley 2561 GNC 179588 : hit_ratio = ((calls - ndistinct) / calls) *
2562 89794 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2563 :
2564 89794 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2565 :
2566 : /*
2567 : * Set the total_cost accounting for the expected cache hit ratio. We
2568 : * also add on a cpu_operator_cost to account for a cache lookup. This
2569 : * will happen regardless of whether it's a cache hit or not.
2570 : */
737 drowley 2571 GIC 89794 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2572 :
737 drowley 2573 ECB : /* Now adjust the total cost to account for cache evictions */
2574 :
2575 : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
737 drowley 2576 GIC 89794 : total_cost += cpu_tuple_cost * evict_ratio;
2577 :
2578 : /*
2579 : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2580 : * The per-tuple eviction is really just a pfree, so charging a whole
737 drowley 2581 ECB : * cpu_operator_cost seems a little excessive.
2582 : */
737 drowley 2583 GIC 89794 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
737 drowley 2584 ECB :
2585 : /*
2586 : * Now adjust for storing things in the cache, since that's not free
2587 : * either. Everything must go in the cache. We don't proportion this
2588 : * over any ratio, just apply it once for the scan. We charge a
2589 : * cpu_tuple_cost for the creation of the cache entry and also a
2590 : * cpu_operator_cost for each tuple we expect to cache.
2591 : */
737 drowley 2592 GIC 89794 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2593 :
2594 : /*
2595 : * Getting the first row must be also be proportioned according to the
737 drowley 2596 ECB : * expected cache hit ratio.
2597 : */
737 drowley 2598 GIC 89794 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2599 :
2600 : /*
2601 : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2602 : * which we'll do regardless of whether it was a cache hit or not.
737 drowley 2603 ECB : */
737 drowley 2604 GIC 89794 : startup_cost += cpu_tuple_cost;
2605 :
2606 89794 : *rescan_startup_cost = startup_cost;
2607 89794 : *rescan_total_cost = total_cost;
2608 89794 : }
2609 :
2610 : /*
2611 : * cost_agg
7444 tgl 2612 ECB : * Determines and returns the cost of performing an Agg plan node,
2613 : * including the cost of its input.
2614 : *
2615 : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2616 : * we are using a hashed Agg node just to do grouping).
2617 : *
2618 : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2619 : * are for appropriately-sorted input.
2620 : */
2621 : void
6517 tgl 2622 GIC 29989 : cost_agg(Path *path, PlannerInfo *root,
2623 : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
7444 tgl 2624 ECB : int numGroupCols, double numGroups,
2625 : List *quals,
2626 : Cost input_startup_cost, Cost input_total_cost,
1117 jdavis 2627 : double input_tuples, double input_width)
7444 tgl 2628 : {
2629 : double output_tuples;
2630 : Cost startup_cost;
2631 : Cost total_cost;
2632 : AggClauseCosts dummy_aggcosts;
2633 :
2634 : /* Use all-zero per-aggregate costs if NULL is passed */
4368 tgl 2635 GIC 29989 : if (aggcosts == NULL)
2636 : {
2637 5701 : Assert(aggstrategy == AGG_HASHED);
267 peter 2638 34206 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
4368 tgl 2639 5701 : aggcosts = &dummy_aggcosts;
2640 : }
2641 :
7444 tgl 2642 ECB : /*
2643 : * The transCost.per_tuple component of aggcosts should be charged once
2644 : * per input tuple, corresponding to the costs of evaluating the aggregate
2645 : * transfns and their input expressions. The finalCost.per_tuple component
2646 : * is charged once per output tuple, corresponding to the costs of
2647 : * evaluating the finalfns. Startup costs are of course charged but once.
2648 : *
2649 : * If we are grouping, we charge an additional cpu_operator_cost per
2650 : * grouping column per input tuple for grouping comparisons.
2651 : *
2652 : * We will produce a single output tuple if not grouping, and a tuple per
2653 : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2654 : *
6347 bruce 2655 : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2656 : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2657 : * input path is already sorted appropriately, AGG_SORTED should be
2658 : * preferred (since it has no risk of memory overflow). This will happen
2659 : * as long as the computed total costs are indeed exactly equal --- but if
2660 : * there's roundoff error we might do the wrong thing. So be sure that
2661 : * the computations below form the same intermediate values in the same
2662 : * order.
2663 : */
7444 tgl 2664 GIC 29989 : if (aggstrategy == AGG_PLAIN)
2665 : {
2666 15454 : startup_cost = input_total_cost;
4368 2667 15454 : startup_cost += aggcosts->transCost.startup;
2668 15454 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1520 2669 15454 : startup_cost += aggcosts->finalCost.startup;
2670 15454 : startup_cost += aggcosts->finalCost.per_tuple;
2671 : /* we aren't grouping */
6434 2672 15454 : total_cost = startup_cost + cpu_tuple_cost;
4090 2673 15454 : output_tuples = 1;
2674 : }
2204 rhodiumtoad 2675 14535 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2676 : {
2677 : /* Here we are able to deliver output on-the-fly */
7444 tgl 2678 5045 : startup_cost = input_startup_cost;
2679 5045 : total_cost = input_total_cost;
2204 rhodiumtoad 2680 5045 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2681 : {
2682 228 : startup_cost += disable_cost;
2683 228 : total_cost += disable_cost;
2204 rhodiumtoad 2684 ECB : }
2685 : /* calcs phrased this way to match HASHED case, see note above */
4368 tgl 2686 CBC 5045 : total_cost += aggcosts->transCost.startup;
2687 5045 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2688 5045 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1520 2689 5045 : total_cost += aggcosts->finalCost.startup;
2690 5045 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
6434 tgl 2691 GIC 5045 : total_cost += cpu_tuple_cost * numGroups;
4090 tgl 2692 CBC 5045 : output_tuples = numGroups;
7444 tgl 2693 ECB : }
2694 : else
2695 : {
2696 : /* must be AGG_HASHED */
7444 tgl 2697 GIC 9490 : startup_cost = input_total_cost;
2575 rhaas 2698 CBC 9490 : if (!enable_hashagg)
2699 687 : startup_cost += disable_cost;
4368 tgl 2700 9490 : startup_cost += aggcosts->transCost.startup;
4368 tgl 2701 GIC 9490 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1117 jdavis 2702 ECB : /* cost of computing hash value */
4368 tgl 2703 CBC 9490 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1520 tgl 2704 GIC 9490 : startup_cost += aggcosts->finalCost.startup;
2705 :
7444 tgl 2706 CBC 9490 : total_cost = startup_cost;
1520 2707 9490 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
1117 jdavis 2708 ECB : /* cost of retrieving from hash table */
6434 tgl 2709 CBC 9490 : total_cost += cpu_tuple_cost * numGroups;
4090 2710 9490 : output_tuples = numGroups;
7444 tgl 2711 ECB : }
2712 :
2713 : /*
2714 : * Add the disk costs of hash aggregation that spills to disk.
2715 : *
2716 : * Groups that go into the hash table stay in memory until finalized, so
1060 2717 : * spilling and reprocessing tuples doesn't incur additional invocations
2718 : * of transCost or finalCost. Furthermore, the computed hash value is
2719 : * stored with the spilled tuples, so we don't incur extra invocations of
2720 : * the hash function.
1117 jdavis 2721 : *
2722 : * Hash Agg begins returning tuples after the first batch is complete.
1060 tgl 2723 : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2724 : * accrue reads only to total_cost.
2725 : */
1117 jdavis 2726 CBC 29989 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
1117 jdavis 2727 ECB : {
2728 : double pages;
1060 tgl 2729 CBC 9912 : double pages_written = 0.0;
2730 9912 : double pages_read = 0.0;
2731 : double spill_cost;
2732 : double hashentrysize;
2733 : double nbatches;
2734 : Size mem_limit;
2735 : uint64 ngroups_limit;
2736 : int num_partitions;
2737 : int depth;
2738 :
2739 : /*
2740 : * Estimate number of batches based on the computed limits. If less
2741 : * than or equal to one, all groups are expected to fit in memory;
2742 : * otherwise we expect to spill.
2743 : */
866 heikki.linnakangas 2744 GIC 9912 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2745 : input_width,
1060 tgl 2746 CBC 9912 : aggcosts->transitionSpace);
1117 jdavis 2747 GIC 9912 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2748 : &ngroups_limit, &num_partitions);
1117 jdavis 2749 ECB :
1060 tgl 2750 CBC 9912 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2751 : numGroups / ngroups_limit);
2752 :
1107 jdavis 2753 GIC 9912 : nbatches = Max(ceil(nbatches), 1.0);
2754 9912 : num_partitions = Max(num_partitions, 2);
2755 :
2756 : /*
2757 : * The number of partitions can change at different levels of
2758 : * recursion; but for the purposes of this calculation assume it stays
2759 : * constant.
2760 : */
1060 tgl 2761 9912 : depth = ceil(log(nbatches) / log(num_partitions));
2762 :
2763 : /*
1117 jdavis 2764 ECB : * Estimate number of pages read and written. For each level of
2765 : * recursion, a tuple must be written and then later read.
2766 : */
1107 jdavis 2767 CBC 9912 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
1107 jdavis 2768 GIC 9912 : pages_written = pages_read = pages * depth;
2769 :
944 jdavis 2770 ECB : /*
2771 : * HashAgg has somewhat worse IO behavior than Sort on typical
2772 : * hardware/OS combinations. Account for this with a generic penalty.
2773 : */
944 jdavis 2774 CBC 9912 : pages_read *= 2.0;
944 jdavis 2775 GIC 9912 : pages_written *= 2.0;
2776 :
1117 2777 9912 : startup_cost += pages_written * random_page_cost;
2778 9912 : total_cost += pages_written * random_page_cost;
2779 9912 : total_cost += pages_read * seq_page_cost;
2780 :
944 jdavis 2781 ECB : /* account for CPU cost of spilling a tuple and reading it back */
944 jdavis 2782 GIC 9912 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2783 9912 : startup_cost += spill_cost;
2784 9912 : total_cost += spill_cost;
2785 : }
2786 :
1984 tgl 2787 ECB : /*
2788 : * If there are quals (HAVING quals), account for their cost and
2789 : * selectivity.
2790 : */
1984 tgl 2791 GIC 29989 : if (quals)
2792 : {
2793 : QualCost qual_cost;
1984 tgl 2794 ECB :
1984 tgl 2795 CBC 1852 : cost_qual_eval(&qual_cost, quals, root);
1984 tgl 2796 GIC 1852 : startup_cost += qual_cost.startup;
1984 tgl 2797 CBC 1852 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
1984 tgl 2798 ECB :
1984 tgl 2799 CBC 1852 : output_tuples = clamp_row_est(output_tuples *
1984 tgl 2800 GIC 1852 : clauselist_selectivity(root,
2801 : quals,
1984 tgl 2802 ECB : 0,
2803 : JOIN_INNER,
2804 : NULL));
2805 : }
2806 :
4090 tgl 2807 GIC 29989 : path->rows = output_tuples;
7444 2808 29989 : path->startup_cost = startup_cost;
2809 29989 : path->total_cost = total_cost;
2810 29989 : }
7444 tgl 2811 ECB :
2812 : /*
2813 : * cost_windowagg
2814 : * Determines and returns the cost of performing a WindowAgg plan node,
5215 2815 : * including the cost of its input.
2816 : *
2817 : * Input is assumed already properly sorted.
2818 : */
2819 : void
5215 tgl 2820 CBC 1155 : cost_windowagg(Path *path, PlannerInfo *root,
2821 : List *windowFuncs, int numPartCols, int numOrderCols,
2822 : Cost input_startup_cost, Cost input_total_cost,
2823 : double input_tuples)
2824 : {
2825 : Cost startup_cost;
2826 : Cost total_cost;
4368 tgl 2827 ECB : ListCell *lc;
5215 2828 :
5215 tgl 2829 CBC 1155 : startup_cost = input_startup_cost;
2830 1155 : total_cost = input_total_cost;
2831 :
2832 : /*
2833 : * Window functions are assumed to cost their stated execution cost, plus
2834 : * the cost of evaluating their input expressions, per tuple. Since they
2835 : * may in fact evaluate their inputs at multiple rows during each cycle,
2836 : * this could be a drastic underestimate; but without a way to know how
2837 : * many rows the window function will fetch, it's hard to do better. In
2838 : * any case, it's a good estimate for all the built-in window functions,
2839 : * so we'll just do this for now.
4368 tgl 2840 ECB : */
4368 tgl 2841 GIC 2580 : foreach(lc, windowFuncs)
2842 : {
2190 2843 1425 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
2844 : Cost wfunccost;
2845 : QualCost argcosts;
2846 :
1520 2847 1425 : argcosts.startup = argcosts.per_tuple = 0;
2848 1425 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
1520 tgl 2849 ECB : &argcosts);
1520 tgl 2850 CBC 1425 : startup_cost += argcosts.startup;
1520 tgl 2851 GIC 1425 : wfunccost = argcosts.per_tuple;
2852 :
2853 : /* also add the input expressions' cost to per-input-row costs */
4368 2854 1425 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
2855 1425 : startup_cost += argcosts.startup;
2856 1425 : wfunccost += argcosts.per_tuple;
2857 :
2858 : /*
2859 : * Add the filter's cost to per-input-row costs. XXX We should reduce
2860 : * input expression costs according to filter selectivity.
3554 noah 2861 ECB : */
3554 noah 2862 GIC 1425 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3554 noah 2863 CBC 1425 : startup_cost += argcosts.startup;
3554 noah 2864 GIC 1425 : wfunccost += argcosts.per_tuple;
2865 :
4368 tgl 2866 1425 : total_cost += wfunccost * input_tuples;
4368 tgl 2867 ECB : }
2868 :
2869 : /*
2870 : * We also charge cpu_operator_cost per grouping column per tuple for
2871 : * grouping comparisons, plus cpu_tuple_cost per tuple for general
2872 : * overhead.
2873 : *
2874 : * XXX this neglects costs of spooling the data to disk when it overflows
2875 : * work_mem. Sooner or later that should get accounted for.
5215 2876 : */
4368 tgl 2877 GIC 1155 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
5215 2878 1155 : total_cost += cpu_tuple_cost * input_tuples;
2879 :
4090 2880 1155 : path->rows = input_tuples;
5215 2881 1155 : path->startup_cost = startup_cost;
5215 tgl 2882 CBC 1155 : path->total_cost = total_cost;
2883 1155 : }
5215 tgl 2884 ECB :
2885 : /*
7444 2886 : * cost_group
2887 : * Determines and returns the cost of performing a Group plan node,
2888 : * including the cost of its input.
2889 : *
2890 : * Note: caller must ensure that input costs are for appropriately-sorted
2891 : * input.
2892 : */
2893 : void
6517 tgl 2894 GIC 2172 : cost_group(Path *path, PlannerInfo *root,
2895 : int numGroupCols, double numGroups,
2896 : List *quals,
7444 tgl 2897 ECB : Cost input_startup_cost, Cost input_total_cost,
2898 : double input_tuples)
2899 : {
1984 2900 : double output_tuples;
7444 2901 : Cost startup_cost;
2902 : Cost total_cost;
2903 :
1984 tgl 2904 GIC 2172 : output_tuples = numGroups;
7444 2905 2172 : startup_cost = input_startup_cost;
2906 2172 : total_cost = input_total_cost;
2907 :
2908 : /*
2909 : * Charge one cpu_operator_cost per comparison per input tuple. We assume
2910 : * all columns get compared at most of the tuples.
2911 : */
2912 2172 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
2913 :
1984 tgl 2914 ECB : /*
2915 : * If there are quals (HAVING quals), account for their cost and
2916 : * selectivity.
2917 : */
1984 tgl 2918 GIC 2172 : if (quals)
2919 : {
2920 : QualCost qual_cost;
2921 :
1984 tgl 2922 UIC 0 : cost_qual_eval(&qual_cost, quals, root);
2923 0 : startup_cost += qual_cost.startup;
1984 tgl 2924 LBC 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
1984 tgl 2925 ECB :
1984 tgl 2926 LBC 0 : output_tuples = clamp_row_est(output_tuples *
1984 tgl 2927 UIC 0 : clauselist_selectivity(root,
2928 : quals,
2929 : 0,
2930 : JOIN_INNER,
2931 : NULL));
1984 tgl 2932 ECB : }
2933 :
1984 tgl 2934 GIC 2172 : path->rows = output_tuples;
7444 2935 2172 : path->startup_cost = startup_cost;
2936 2172 : path->total_cost = total_cost;
2937 2172 : }
9770 scrappy 2938 ECB :
2939 : /*
2940 : * initial_cost_nestloop
2941 : * Preliminary estimate of the cost of a nestloop join path.
4090 tgl 2942 EUB : *
2943 : * This must quickly produce lower-bound estimates of the path's startup and
2944 : * total costs. If we are unable to eliminate the proposed path from
2945 : * consideration using the lower bounds, final_cost_nestloop will be called
2946 : * to obtain the final estimates.
2947 : *
2948 : * The exact division of labor between this function and final_cost_nestloop
2949 : * is private to them, and represents a tradeoff between speed of the initial
2950 : * estimate and getting a tight lower bound. We choose to not examine the
2951 : * join quals here, since that's by far the most expensive part of the
2952 : * calculations. The end result is that CPU-cost considerations must be
2953 : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
2867 tgl 2954 ECB : * incorporation of the inner path's run cost.
4090 2955 : *
2956 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
2957 : * other data to be used by final_cost_nestloop
2958 : * 'jointype' is the type of join to be performed
2959 : * 'outer_path' is the outer input to the join
2960 : * 'inner_path' is the inner input to the join
2961 : * 'extra' contains miscellaneous information about the join
2962 : */
2963 : void
4090 tgl 2964 GIC 920500 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
2965 : JoinType jointype,
2966 : Path *outer_path, Path *inner_path,
2967 : JoinPathExtraData *extra)
2968 : {
8454 2969 920500 : Cost startup_cost = 0;
2970 920500 : Cost run_cost = 0;
4090 2971 920500 : double outer_path_rows = outer_path->rows;
2972 : Cost inner_rescan_start_cost;
2973 : Cost inner_rescan_total_cost;
2974 : Cost inner_run_cost;
2975 : Cost inner_rescan_run_cost;
2976 :
2977 : /* estimate costs to rescan the inner relation */
4957 2978 920500 : cost_rescan(root, inner_path,
2979 : &inner_rescan_start_cost,
2980 : &inner_rescan_total_cost);
2981 :
2982 : /* cost of source data */
2983 :
8454 tgl 2984 ECB : /*
2985 : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
2986 : * before we can start returning tuples, so the join's startup cost is
2987 : * their sum. We'll also pay the inner path's rescan startup cost
2988 : * multiple times.
2989 : */
8454 tgl 2990 CBC 920500 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
2991 920500 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4957 tgl 2992 GIC 920500 : if (outer_path_rows > 1)
2993 625074 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
2994 :
5083 2995 920500 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
4957 2996 920500 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
2997 :
2193 tgl 2998 CBC 920500 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
2193 tgl 2999 GIC 893407 : extra->inner_unique)
3000 : {
3001 : /*
3002 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3003 : * executor will stop after the first match.
3004 : *
3005 : * Getting decent estimates requires inspection of the join quals,
3006 : * which we choose to postpone to final_cost_nestloop.
3007 : */
3008 :
3009 : /* Save private data for final_cost_nestloop */
2867 tgl 3010 CBC 430167 : workspace->inner_run_cost = inner_run_cost;
3011 430167 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
4090 tgl 3012 ECB : }
3013 : else
3014 : {
3015 : /* Normal case; we'll scan whole input rel for each outer row */
4090 tgl 3016 CBC 490333 : run_cost += inner_run_cost;
4090 tgl 3017 GIC 490333 : if (outer_path_rows > 1)
4090 tgl 3018 CBC 339776 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
4090 tgl 3019 ECB : }
3020 :
3021 : /* CPU costs left for later */
3022 :
3023 : /* Public result fields */
4090 tgl 3024 GIC 920500 : workspace->startup_cost = startup_cost;
3025 920500 : workspace->total_cost = startup_cost + run_cost;
3026 : /* Save private data for final_cost_nestloop */
3027 920500 : workspace->run_cost = run_cost;
3028 920500 : }
3029 :
4090 tgl 3030 ECB : /*
3031 : * final_cost_nestloop
3032 : * Final estimate of the cost and result size of a nestloop join path.
3033 : *
3034 : * 'path' is already filled in except for the rows and cost fields
3035 : * 'workspace' is the result from initial_cost_nestloop
2193 3036 : * 'extra' contains miscellaneous information about the join
4090 3037 : */
3038 : void
4090 tgl 3039 GIC 444183 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3040 : JoinCostWorkspace *workspace,
3041 : JoinPathExtraData *extra)
3042 : {
609 peter 3043 444183 : Path *outer_path = path->jpath.outerjoinpath;
609 peter 3044 CBC 444183 : Path *inner_path = path->jpath.innerjoinpath;
4090 tgl 3045 444183 : double outer_path_rows = outer_path->rows;
4090 tgl 3046 GIC 444183 : double inner_path_rows = inner_path->rows;
4090 tgl 3047 CBC 444183 : Cost startup_cost = workspace->startup_cost;
3048 444183 : Cost run_cost = workspace->run_cost;
3049 : Cost cpu_per_tuple;
3050 : QualCost restrict_qual_cost;
3051 : double ntuples;
3052 :
3053 : /* Protect some assumptions below that rowcounts aren't zero */
902 drowley 3054 GIC 444183 : if (outer_path_rows <= 0)
2570 tgl 3055 UIC 0 : outer_path_rows = 1;
902 drowley 3056 GIC 444183 : if (inner_path_rows <= 0)
2570 tgl 3057 282 : inner_path_rows = 1;
3058 : /* Mark the path with the correct row estimate */
609 peter 3059 CBC 444183 : if (path->jpath.path.param_info)
609 peter 3060 GIC 9741 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3061 : else
3062 434442 : path->jpath.path.rows = path->jpath.path.parent->rows;
4090 tgl 3063 ECB :
2277 rhaas 3064 : /* For partial paths, scale row estimate. */
609 peter 3065 CBC 444183 : if (path->jpath.path.parallel_workers > 0)
2216 rhaas 3066 ECB : {
609 peter 3067 CBC 3674 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2216 rhaas 3068 ECB :
609 peter 3069 GIC 3674 : path->jpath.path.rows =
3070 3674 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3071 : }
3072 :
3073 : /*
4090 tgl 3074 ECB : * We could include disable_cost in the preliminary estimate, but that
4090 tgl 3075 EUB : * would amount to optimizing for the case where the join method is
4090 tgl 3076 ECB : * disabled, which doesn't seem like the way to bet.
3077 : */
4090 tgl 3078 GIC 444183 : if (!enable_nestloop)
4090 tgl 3079 CBC 1536 : startup_cost += disable_cost;
4090 tgl 3080 ECB :
3081 : /* cost of inner-relation source data (we already dealt with outer rel) */
3082 :
609 peter 3083 GIC 444183 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
2193 tgl 3084 428364 : extra->inner_unique)
4090 tgl 3085 CBC 287577 : {
3086 : /*
2193 tgl 3087 ECB : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3088 : * executor will stop after the first match.
4090 3089 : */
2867 tgl 3090 CBC 287577 : Cost inner_run_cost = workspace->inner_run_cost;
2867 tgl 3091 GIC 287577 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3092 : double outer_matched_rows;
3093 : double outer_unmatched_rows;
3094 : Selectivity inner_scan_frac;
3095 :
3096 : /*
3097 : * For an outer-rel row that has at least one match, we can expect the
2867 tgl 3098 ECB : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3099 : * rows, if the matches are evenly distributed. Since they probably
3100 : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3101 : * that fraction. (If we used a larger fuzz factor, we'd have to
3102 : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3103 : * least 1, no such clamp is needed now.)
3104 : */
2193 tgl 3105 CBC 287577 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2136 tgl 3106 GIC 287577 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
2193 3107 287577 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3108 :
3109 : /*
2867 tgl 3110 ECB : * Compute number of tuples processed (not number emitted!). First,
3111 : * account for successfully-matched outer rows.
3112 : */
5083 tgl 3113 GIC 287577 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3114 :
3115 : /*
3116 : * Now we need to estimate the actual costs of scanning the inner
3117 : * relation, which may be quite a bit less than N times inner_run_cost
3118 : * due to early scan stops. We consider two cases. If the inner path
3119 : * is an indexscan using all the joinquals as indexquals, then an
3120 : * unmatched outer row results in an indexscan returning no rows,
3121 : * which is probably quite cheap. Otherwise, the executor will have
3122 : * to scan the whole inner rel for an unmatched row; not so cheap.
3123 : */
4007 3124 287577 : if (has_indexed_join_quals(path))
5083 tgl 3125 ECB : {
2867 3126 : /*
3127 : * Successfully-matched outer rows will only require scanning
3128 : * inner_scan_frac of the inner relation. In this case, we don't
3129 : * need to charge the full inner_run_cost even when that's more
3130 : * than inner_rescan_run_cost, because we can assume that none of
3131 : * the inner scans ever scan the whole inner relation. So it's
3132 : * okay to assume that all the inner scan executions can be
3133 : * fractions of the full cost, even if materialization is reducing
3134 : * the rescan cost. At this writing, it's impossible to get here
3135 : * for a materialized inner scan, so inner_run_cost and
3136 : * inner_rescan_run_cost will be the same anyway; but just in
3137 : * case, use inner_run_cost for the first matched tuple and
3138 : * inner_rescan_run_cost for additional ones.
3139 : */
2867 tgl 3140 GIC 53553 : run_cost += inner_run_cost * inner_scan_frac;
3141 53553 : if (outer_matched_rows > 1)
3142 5128 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3143 :
2867 tgl 3144 ECB : /*
3145 : * Add the cost of inner-scan executions for unmatched outer rows.
3146 : * We estimate this as the same cost as returning the first tuple
3147 : * of a nonempty scan. We consider that these are all rescans,
3148 : * since we used inner_run_cost once already.
3149 : */
2136 tgl 3150 GIC 53553 : run_cost += outer_unmatched_rows *
4957 3151 53553 : inner_rescan_run_cost / inner_path_rows;
3152 :
3153 : /*
3154 : * We won't be evaluating any quals at all for unmatched rows, so
3155 : * don't add them to ntuples.
3156 : */
3157 : }
3158 : else
3159 : {
2867 tgl 3160 ECB : /*
3161 : * Here, a complicating factor is that rescans may be cheaper than
3162 : * first scans. If we never scan all the way to the end of the
3163 : * inner rel, it might be (depending on the plan type) that we'd
3164 : * never pay the whole inner first-scan run cost. However it is
3165 : * difficult to estimate whether that will happen (and it could
3166 : * not happen if there are any unmatched outer rows!), so be
3167 : * conservative and always charge the whole first-scan cost once.
3168 : * We consider this charge to correspond to the first unmatched
3169 : * outer row, unless there isn't one in our estimate, in which
2136 3170 : * case blame it on the first matched row.
2867 3171 : */
3172 :
3173 : /* First, count all unmatched join tuples as being processed */
2136 tgl 3174 GIC 234024 : ntuples += outer_unmatched_rows * inner_path_rows;
3175 :
3176 : /* Now add the forced full scan, and decrement appropriate count */
2867 3177 234024 : run_cost += inner_run_cost;
2136 3178 234024 : if (outer_unmatched_rows >= 1)
3179 227885 : outer_unmatched_rows -= 1;
3180 : else
3181 6139 : outer_matched_rows -= 1;
3182 :
3183 : /* Add inner run cost for additional outer tuples having matches */
3184 234024 : if (outer_matched_rows > 0)
3185 84771 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3186 :
3187 : /* Add inner run cost for additional unmatched outer tuples */
3188 234024 : if (outer_unmatched_rows > 0)
3189 149068 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3190 : }
3191 : }
3192 : else
3193 : {
4090 tgl 3194 ECB : /* Normal-case source costs were included in preliminary estimate */
3195 :
3196 : /* Compute number of tuples processed (not number emitted!) */
5083 tgl 3197 CBC 156606 : ntuples = outer_path_rows * inner_path_rows;
5083 tgl 3198 ECB : }
8491 3199 :
3200 : /* CPU costs */
609 peter 3201 CBC 444183 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
7392 tgl 3202 GIC 444183 : startup_cost += restrict_qual_cost.startup;
3203 444183 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
8454 tgl 3204 CBC 444183 : run_cost += cpu_per_tuple * ntuples;
8454 tgl 3205 ECB :
3206 : /* tlist eval costs are paid per output row, not per tuple scanned */
609 peter 3207 GIC 444183 : startup_cost += path->jpath.path.pathtarget->cost.startup;
609 peter 3208 CBC 444183 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2607 tgl 3209 ECB :
609 peter 3210 GIC 444183 : path->jpath.path.startup_cost = startup_cost;
3211 444183 : path->jpath.path.total_cost = startup_cost + run_cost;
9770 scrappy 3212 444183 : }
3213 :
3214 : /*
3215 : * initial_cost_mergejoin
3216 : * Preliminary estimate of the cost of a mergejoin path.
4090 tgl 3217 ECB : *
3218 : * This must quickly produce lower-bound estimates of the path's startup and
3219 : * total costs. If we are unable to eliminate the proposed path from
3220 : * consideration using the lower bounds, final_cost_mergejoin will be called
3221 : * to obtain the final estimates.
3222 : *
3223 : * The exact division of labor between this function and final_cost_mergejoin
3224 : * is private to them, and represents a tradeoff between speed of the initial
3225 : * estimate and getting a tight lower bound. We choose to not examine the
3226 : * join quals here, except for obtaining the scan selectivity estimate which
3227 : * is really essential (but fortunately, use of caching keeps the cost of
3228 : * getting that down to something reasonable).
3229 : * We also assume that cost_sort is cheap enough to use here.
3230 : *
3231 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3232 : * other data to be used by final_cost_mergejoin
3233 : * 'jointype' is the type of join to be performed
3234 : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3235 : * 'outer_path' is the outer input to the join
3236 : * 'inner_path' is the inner input to the join
3237 : * 'outersortkeys' is the list of sort keys for the outer path
3238 : * 'innersortkeys' is the list of sort keys for the inner path
3239 : * 'extra' contains miscellaneous information about the join
3240 : *
3241 : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3242 : * sort is needed because the respective source path is already ordered.
3243 : */
3244 : void
4090 tgl 3245 GIC 430011 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3246 : JoinType jointype,
3247 : List *mergeclauses,
3248 : Path *outer_path, Path *inner_path,
3249 : List *outersortkeys, List *innersortkeys,
3250 : JoinPathExtraData *extra)
3251 : {
8454 3252 430011 : Cost startup_cost = 0;
3253 430011 : Cost run_cost = 0;
4090 3254 430011 : double outer_path_rows = outer_path->rows;
3255 430011 : double inner_path_rows = inner_path->rows;
3256 : Cost inner_run_cost;
3257 : double outer_rows,
3258 : inner_rows,
3259 : outer_skip_rows,
3260 : inner_skip_rows;
3261 : Selectivity outerstartsel,
3262 : outerendsel,
3263 : innerstartsel,
3264 : innerendsel;
8454 tgl 3265 ECB : Path sort_path; /* dummy for result of cost_sort */
3266 :
3267 : /* Protect some assumptions below that rowcounts aren't zero */
902 drowley 3268 GIC 430011 : if (outer_path_rows <= 0)
5494 tgl 3269 48 : outer_path_rows = 1;
902 drowley 3270 430011 : if (inner_path_rows <= 0)
5494 tgl 3271 63 : inner_path_rows = 1;
5494 tgl 3272 ECB :
7709 3273 : /*
6579 3274 : * A merge join will stop as soon as it exhausts either input stream
3275 : * (unless it's an outer join, in which case the outer side has to be
3276 : * scanned all the way anyway). Estimate fraction of the left and right
3277 : * inputs that will actually need to be scanned. Likewise, we can
3278 : * estimate the number of rows that will be skipped before the first join
3279 : * pair is found, which should be factored into startup cost. We use only
3280 : * the first (most significant) merge clause for this purpose. Since
3281 : * mergejoinscansel() is a fairly expensive computation, we cache the
3282 : * results in the merge clause RestrictInfo.
3283 : */
4090 tgl 3284 GIC 430011 : if (mergeclauses && jointype != JOIN_FULL)
7709 3285 427070 : {
5923 3286 427070 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3287 : List *opathkeys;
5923 tgl 3288 ECB : List *ipathkeys;
5624 bruce 3289 : PathKey *opathkey;
3290 : PathKey *ipathkey;
5921 tgl 3291 : MergeScanSelCache *cache;
3292 :
3293 : /* Get the input pathkeys to determine the sort-order details */
5923 tgl 3294 GIC 427070 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3295 427070 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3296 427070 : Assert(opathkeys);
3297 427070 : Assert(ipathkeys);
3298 427070 : opathkey = (PathKey *) linitial(opathkeys);
3299 427070 : ipathkey = (PathKey *) linitial(ipathkeys);
3300 : /* debugging check */
3301 427070 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4404 3302 427070 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
5923 3303 427070 : opathkey->pk_strategy != ipathkey->pk_strategy ||
5923 tgl 3304 CBC 427070 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
5923 tgl 3305 LBC 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
5923 tgl 3306 ECB :
3307 : /* Get the selectivity with caching */
5921 tgl 3308 GIC 427070 : cache = cached_scansel(root, firstclause, opathkey);
3309 :
5923 3310 427070 : if (bms_is_subset(firstclause->left_relids,
3311 427070 : outer_path->parent->relids))
3312 : {
3313 : /* left side of clause is outer */
5601 tgl 3314 CBC 223601 : outerstartsel = cache->leftstartsel;
3315 223601 : outerendsel = cache->leftendsel;
3316 223601 : innerstartsel = cache->rightstartsel;
3317 223601 : innerendsel = cache->rightendsel;
6942 tgl 3318 ECB : }
3319 : else
3320 : {
3321 : /* left side of clause is inner */
5601 tgl 3322 CBC 203469 : outerstartsel = cache->rightstartsel;
3323 203469 : outerendsel = cache->rightendsel;
3324 203469 : innerstartsel = cache->leftstartsel;
5601 tgl 3325 GBC 203469 : innerendsel = cache->leftendsel;
3326 : }
4090 tgl 3327 GIC 427070 : if (jointype == JOIN_LEFT ||
4090 tgl 3328 ECB : jointype == JOIN_ANTI)
3329 : {
5601 tgl 3330 CBC 84058 : outerstartsel = 0.0;
3331 84058 : outerendsel = 1.0;
3332 : }
4 tgl 3333 GNC 343012 : else if (jointype == JOIN_RIGHT ||
3334 : jointype == JOIN_RIGHT_ANTI)
5601 tgl 3335 ECB : {
5601 tgl 3336 CBC 83776 : innerstartsel = 0.0;
3337 83776 : innerendsel = 1.0;
5601 tgl 3338 ECB : }
3339 : }
3340 : else
3341 : {
3342 : /* cope with clauseless or full mergejoin */
5601 tgl 3343 CBC 2941 : outerstartsel = innerstartsel = 0.0;
3344 2941 : outerendsel = innerendsel = 1.0;
7709 tgl 3345 ECB : }
3346 :
3347 : /*
5601 3348 : * Convert selectivities to row counts. We force outer_rows and
3349 : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3350 : */
5601 tgl 3351 CBC 430011 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3352 430011 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
5601 tgl 3353 GIC 430011 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
5601 tgl 3354 CBC 430011 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3355 :
5601 tgl 3356 GIC 430011 : Assert(outer_skip_rows <= outer_rows);
5601 tgl 3357 CBC 430011 : Assert(inner_skip_rows <= inner_rows);
7382 tgl 3358 ECB :
3359 : /*
3360 : * Readjust scan selectivities to account for above rounding. This is
3361 : * normally an insignificant effect, but when there are only a few rows in
3362 : * the inputs, failing to do this makes for a large percentage error.
3363 : */
5601 tgl 3364 CBC 430011 : outerstartsel = outer_skip_rows / outer_path_rows;
3365 430011 : innerstartsel = inner_skip_rows / inner_path_rows;
5601 tgl 3366 GIC 430011 : outerendsel = outer_rows / outer_path_rows;
3367 430011 : innerendsel = inner_rows / inner_path_rows;
3368 :
4118 3369 430011 : Assert(outerstartsel <= outerendsel);
3370 430011 : Assert(innerstartsel <= innerendsel);
3371 :
8491 tgl 3372 ECB : /* cost of source data */
8397 bruce 3373 :
8454 tgl 3374 CBC 430011 : if (outersortkeys) /* do we need to sort outer? */
8454 tgl 3375 ECB : {
8454 tgl 3376 GIC 207126 : cost_sort(&sort_path,
7978 tgl 3377 ECB : root,
8454 3378 : outersortkeys,
3379 : outer_path->total_cost,
3380 : outer_path_rows,
2607 tgl 3381 GIC 207126 : outer_path->pathtarget->width,
3382 : 0.0,
3383 : work_mem,
3384 : -1.0);
8454 tgl 3385 CBC 207126 : startup_cost += sort_path.startup_cost;
5601 3386 207126 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3387 207126 : * outerstartsel;
7709 3388 207126 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
5601 tgl 3389 GIC 207126 : * (outerendsel - outerstartsel);
8454 tgl 3390 ECB : }
3391 : else
3392 : {
8454 tgl 3393 GIC 222885 : startup_cost += outer_path->startup_cost;
5601 3394 222885 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
5601 tgl 3395 CBC 222885 : * outerstartsel;
7709 tgl 3396 GIC 222885 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
5601 tgl 3397 CBC 222885 : * (outerendsel - outerstartsel);
3398 : }
3399 :
8454 tgl 3400 GIC 430011 : if (innersortkeys) /* do we need to sort inner? */
3401 : {
8454 tgl 3402 CBC 339579 : cost_sort(&sort_path,
3403 : root,
3404 : innersortkeys,
3405 : inner_path->total_cost,
7377 tgl 3406 ECB : inner_path_rows,
2607 tgl 3407 CBC 339579 : inner_path->pathtarget->width,
4567 tgl 3408 ECB : 0.0,
3409 : work_mem,
5819 3410 : -1.0);
8454 tgl 3411 GIC 339579 : startup_cost += sort_path.startup_cost;
5601 3412 339579 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
4893 3413 339579 : * innerstartsel;
4893 tgl 3414 CBC 339579 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3415 339579 : * (innerendsel - innerstartsel);
8454 tgl 3416 ECB : }
3417 : else
3418 : {
8454 tgl 3419 GIC 90432 : startup_cost += inner_path->startup_cost;
5601 3420 90432 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
4893 tgl 3421 CBC 90432 : * innerstartsel;
4893 tgl 3422 GIC 90432 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
4893 tgl 3423 CBC 90432 : * (innerendsel - innerstartsel);
3424 : }
3425 :
3426 : /*
3427 : * We can't yet determine whether rescanning occurs, or whether
4090 tgl 3428 ECB : * materialization of the inner input should be done. The minimum
3429 : * possible inner input cost, regardless of rescan and materialization
3430 : * considerations, is inner_run_cost. We include that in
3431 : * workspace->total_cost, but not yet in run_cost.
3432 : */
3433 :
3434 : /* CPU costs left for later */
3435 :
3436 : /* Public result fields */
4090 tgl 3437 GIC 430011 : workspace->startup_cost = startup_cost;
3438 430011 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3439 : /* Save private data for final_cost_mergejoin */
4090 tgl 3440 CBC 430011 : workspace->run_cost = run_cost;
3441 430011 : workspace->inner_run_cost = inner_run_cost;
3442 430011 : workspace->outer_rows = outer_rows;
3443 430011 : workspace->inner_rows = inner_rows;
3444 430011 : workspace->outer_skip_rows = outer_skip_rows;
4090 tgl 3445 GIC 430011 : workspace->inner_skip_rows = inner_skip_rows;
3446 430011 : }
3447 :
3448 : /*
3449 : * final_cost_mergejoin
3450 : * Final estimate of the cost and result size of a mergejoin path.
3451 : *
3452 : * Unlike other costsize functions, this routine makes two actual decisions:
3453 : * whether the executor will need to do mark/restore, and whether we should
3454 : * materialize the inner path. It would be logically cleaner to build
3455 : * separate paths testing these alternatives, but that would require repeating
3456 : * most of the cost calculations, which are not all that cheap. Since the
3457 : * choice will not affect output pathkeys or startup cost, only total cost,
2193 tgl 3458 ECB : * there is no possibility of wanting to keep more than one path. So it seems
3459 : * best to make the decisions here and record them in the path's
3460 : * skip_mark_restore and materialize_inner fields.
3461 : *
3462 : * Mark/restore overhead is usually required, but can be skipped if we know
3463 : * that the executor need find only one match per outer tuple, and that the
3464 : * mergeclauses are sufficient to identify a match.
3465 : *
3466 : * We materialize the inner path if we need mark/restore and either the inner
3467 : * path can't support mark/restore, or it's cheaper to use an interposed
3468 : * Material node to handle mark/restore.
3469 : *
3470 : * 'path' is already filled in except for the rows and cost fields and
3471 : * skip_mark_restore and materialize_inner
3472 : * 'workspace' is the result from initial_cost_mergejoin
3473 : * 'extra' contains miscellaneous information about the join
3474 : */
3475 : void
4090 tgl 3476 GIC 106831 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3477 : JoinCostWorkspace *workspace,
3478 : JoinPathExtraData *extra)
3479 : {
3480 106831 : Path *outer_path = path->jpath.outerjoinpath;
3481 106831 : Path *inner_path = path->jpath.innerjoinpath;
3482 106831 : double inner_path_rows = inner_path->rows;
3483 106831 : List *mergeclauses = path->path_mergeclauses;
3484 106831 : List *innersortkeys = path->innersortkeys;
3485 106831 : Cost startup_cost = workspace->startup_cost;
3486 106831 : Cost run_cost = workspace->run_cost;
3487 106831 : Cost inner_run_cost = workspace->inner_run_cost;
3488 106831 : double outer_rows = workspace->outer_rows;
3489 106831 : double inner_rows = workspace->inner_rows;
3490 106831 : double outer_skip_rows = workspace->outer_skip_rows;
3491 106831 : double inner_skip_rows = workspace->inner_skip_rows;
3492 : Cost cpu_per_tuple,
3493 : bare_inner_cost,
3494 : mat_inner_cost;
3495 : QualCost merge_qual_cost;
3496 : QualCost qp_qual_cost;
4090 tgl 3497 ECB : double mergejointuples,
3498 : rescannedtuples;
3499 : double rescanratio;
3500 :
902 drowley 3501 : /* Protect some assumptions below that rowcounts aren't zero */
902 drowley 3502 CBC 106831 : if (inner_path_rows <= 0)
4090 tgl 3503 45 : inner_path_rows = 1;
4090 tgl 3504 ECB :
4007 3505 : /* Mark the path with the correct row estimate */
4007 tgl 3506 CBC 106831 : if (path->jpath.path.param_info)
3507 312 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4007 tgl 3508 ECB : else
4007 tgl 3509 CBC 106519 : path->jpath.path.rows = path->jpath.path.parent->rows;
4090 tgl 3510 ECB :
2277 rhaas 3511 : /* For partial paths, scale row estimate. */
2277 rhaas 3512 CBC 106831 : if (path->jpath.path.parallel_workers > 0)
3513 : {
2153 bruce 3514 GIC 4447 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3515 :
2216 rhaas 3516 4447 : path->jpath.path.rows =
3517 4447 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3518 : }
3519 :
3520 : /*
3521 : * We could include disable_cost in the preliminary estimate, but that
3522 : * would amount to optimizing for the case where the join method is
4090 tgl 3523 ECB : * disabled, which doesn't seem like the way to bet.
3524 : */
4090 tgl 3525 GIC 106831 : if (!enable_mergejoin)
4090 tgl 3526 UIC 0 : startup_cost += disable_cost;
4090 tgl 3527 ECB :
3528 : /*
3529 : * Compute cost of the mergequals and qpquals (other restriction clauses)
3530 : * separately.
3531 : */
4090 tgl 3532 GIC 106831 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
4090 tgl 3533 CBC 106831 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4090 tgl 3534 GIC 106831 : qp_qual_cost.startup -= merge_qual_cost.startup;
4090 tgl 3535 CBC 106831 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3536 :
2193 tgl 3537 ECB : /*
3538 : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3539 : * executor will stop scanning for matches after the first match. When
3540 : * all the joinclauses are merge clauses, this means we don't ever need to
3541 : * back up the merge, and so we can skip mark/restore overhead.
3542 : */
2193 tgl 3543 GIC 106831 : if ((path->jpath.jointype == JOIN_SEMI ||
3544 105505 : path->jpath.jointype == JOIN_ANTI ||
3545 153941 : extra->inner_unique) &&
2193 tgl 3546 CBC 52663 : (list_length(path->jpath.joinrestrictinfo) ==
2193 tgl 3547 GBC 52663 : list_length(path->path_mergeclauses)))
2193 tgl 3548 GIC 45803 : path->skip_mark_restore = true;
3549 : else
3550 61028 : path->skip_mark_restore = false;
3551 :
3552 : /*
3260 bruce 3553 ECB : * Get approx # tuples passing the mergequals. We use approx_tuple_count
4090 tgl 3554 : * here because we need an estimate done with JOIN_INNER semantics.
3555 : */
4090 tgl 3556 CBC 106831 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3557 :
3558 : /*
3559 : * When there are equal merge keys in the outer relation, the mergejoin
3560 : * must rescan any matching tuples in the inner relation. This means
3561 : * re-fetching inner tuples; we have to estimate how often that happens.
3562 : *
3563 : * For regular inner and outer joins, the number of re-fetches can be
4090 tgl 3564 ECB : * estimated approximately as size of merge join output minus size of
3565 : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3566 : * denote the number of values of each key in the outer relation as m1,
3260 bruce 3567 : * m2, ...; in the inner relation, n1, n2, ... Then we have
4090 tgl 3568 : *
3569 : * size of join = m1 * n1 + m2 * n2 + ...
3570 : *
3571 : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3572 : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3573 : * relation
3574 : *
3575 : * This equation works correctly for outer tuples having no inner match
3576 : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3577 : * are effectively subtracting those from the number of rescanned tuples,
3578 : * when we should not. Can we do better without expensive selectivity
3579 : * computations?
3580 : *
3581 : * The whole issue is moot if we are working from a unique-ified outer
3582 : * input, or if we know we don't need to mark/restore at all.
3583 : */
1058 tgl 3584 GIC 106831 : if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
4090 3585 46127 : rescannedtuples = 0;
3586 : else
3587 : {
3588 60704 : rescannedtuples = mergejointuples - inner_path_rows;
3589 : /* Must clamp because of possible underestimate */
3590 60704 : if (rescannedtuples < 0)
3591 26022 : rescannedtuples = 0;
3592 : }
3593 :
3594 : /*
3595 : * We'll inflate various costs this much to account for rescanning. Note
3596 : * that this is to be multiplied by something involving inner_rows, or
3597 : * another number related to the portion of the inner rel we'll scan.
3598 : */
1573 3599 106831 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3600 :
3601 : /*
3602 : * Decide whether we want to materialize the inner input to shield it from
3603 : * mark/restore and performing re-fetches. Our cost model for regular
3604 : * re-fetches is that a re-fetch costs the same as an original fetch,
4893 tgl 3605 ECB : * which is probably an overestimate; but on the other hand we ignore the
3606 : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3607 : * a more refined model. So we just need to inflate the inner run cost by
3608 : * rescanratio.
3609 : */
4893 tgl 3610 GIC 106831 : bare_inner_cost = inner_run_cost * rescanratio;
4790 bruce 3611 ECB :
4893 tgl 3612 : /*
3613 : * When we interpose a Material node the re-fetch cost is assumed to be
3614 : * just cpu_operator_cost per tuple, independently of the underlying
3615 : * plan's cost; and we charge an extra cpu_operator_cost per original
3616 : * fetch as well. Note that we're assuming the materialize node will
3617 : * never spill to disk, since it only has to remember tuples back to the
3618 : * last mark. (If there are a huge number of duplicates, our other cost
3619 : * factors will make the path so expensive that it probably won't get
4790 bruce 3620 : * chosen anyway.) So we don't use cost_rescan here.
3621 : *
3622 : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
3623 : * of the generated Material node.
3624 : */
4893 tgl 3625 GIC 106831 : mat_inner_cost = inner_run_cost +
1573 3626 106831 : cpu_operator_cost * inner_rows * rescanratio;
3627 :
3628 : /*
3629 : * If we don't need mark/restore at all, we don't need materialization.
3630 : */
2193 tgl 3631 CBC 106831 : if (path->skip_mark_restore)
2193 tgl 3632 GIC 45803 : path->materialize_inner = false;
3633 :
3634 : /*
3635 : * Prefer materializing if it looks cheaper, unless the user has asked to
3636 : * suppress materialization.
3637 : */
3638 61028 : else if (enable_material && mat_inner_cost < bare_inner_cost)
4893 3639 1086 : path->materialize_inner = true;
3640 :
3641 : /*
3642 : * Even if materializing doesn't look cheaper, we *must* do it if the
3643 : * inner path is to be used directly (without sorting) and it doesn't
3644 : * support mark/restore.
3645 : *
4893 tgl 3646 ECB : * Since the inner side must be ordered, and only Sorts and IndexScans can
3647 : * create order to begin with, and they both support mark/restore, you
3648 : * might think there's no problem --- but you'd be wrong. Nestloop and
3649 : * merge joins can *preserve* the order of their inputs, so they can be
3650 : * selected as the input of a mergejoin, and they don't support
3651 : * mark/restore at present.
4738 rhaas 3652 : *
4660 bruce 3653 : * We don't test the value of enable_material here, because
3654 : * materialization is required for correctness in this case, and turning
3655 : * it off does not entitle us to deliver an invalid plan.
3656 : */
4893 tgl 3657 GIC 59942 : else if (innersortkeys == NIL &&
3075 rhaas 3658 1858 : !ExecSupportsMarkRestore(inner_path))
4893 tgl 3659 CBC 354 : path->materialize_inner = true;
4790 bruce 3660 ECB :
3661 : /*
3662 : * Also, force materializing if the inner path is to be sorted and the
3663 : * sort is expected to spill to disk. This is because the final merge
3664 : * pass can be done on-the-fly if it doesn't have to support mark/restore.
3665 : * We don't try to adjust the cost estimates for this consideration,
3666 : * though.
3667 : *
3668 : * Since materialization is a performance optimization in this case,
3669 : * rather than necessary for correctness, we skip it if enable_material is
3670 : * off.
3671 : */
4738 rhaas 3672 GIC 59588 : else if (enable_material && innersortkeys != NIL &&
2607 tgl 3673 58060 : relation_byte_size(inner_path_rows,
3674 58060 : inner_path->pathtarget->width) >
4893 3675 58060 : (work_mem * 1024L))
3676 98 : path->materialize_inner = true;
3677 : else
4893 tgl 3678 CBC 59490 : path->materialize_inner = false;
4893 tgl 3679 ECB :
3680 : /* Charge the right incremental cost for the chosen case */
4893 tgl 3681 GIC 106831 : if (path->materialize_inner)
3682 1538 : run_cost += mat_inner_cost;
3683 : else
3684 105293 : run_cost += bare_inner_cost;
3685 :
3686 : /* CPU costs */
3687 :
3688 : /*
3689 : * The number of tuple comparisons needed is approximately number of outer
3690 : * rows plus number of inner rows plus number of rescanned tuples (can we
3691 : * refine this?). At each one, we need to evaluate the mergejoin quals.
3692 : */
7377 tgl 3693 CBC 106831 : startup_cost += merge_qual_cost.startup;
5601 3694 106831 : startup_cost += merge_qual_cost.per_tuple *
3695 106831 : (outer_skip_rows + inner_skip_rows * rescanratio);
7377 3696 106831 : run_cost += merge_qual_cost.per_tuple *
5601 3697 106831 : ((outer_rows - outer_skip_rows) +
5601 tgl 3698 GIC 106831 : (inner_rows - inner_skip_rows) * rescanratio);
7978 tgl 3699 ECB :
3700 : /*
3701 : * For each tuple that gets through the mergejoin proper, we charge
3702 : * cpu_tuple_cost plus the cost of evaluating additional restriction
3260 bruce 3703 : * clauses that are to be applied at the join. (This is pessimistic since
3704 : * not all of the quals may get evaluated at each tuple.)
5083 tgl 3705 : *
3706 : * Note: we could adjust for SEMI/ANTI joins skipping some qual
3707 : * evaluations here, but it's probably not worth the trouble.
3708 : */
7377 tgl 3709 GIC 106831 : startup_cost += qp_qual_cost.startup;
3710 106831 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
5349 3711 106831 : run_cost += cpu_per_tuple * mergejointuples;
3712 :
3713 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 3714 CBC 106831 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3715 106831 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
2607 tgl 3716 ECB :
7377 tgl 3717 CBC 106831 : path->jpath.path.startup_cost = startup_cost;
3718 106831 : path->jpath.path.total_cost = startup_cost + run_cost;
9770 scrappy 3719 106831 : }
3720 :
3721 : /*
3722 : * run mergejoinscansel() with caching
3723 : */
3724 : static MergeScanSelCache *
5624 bruce 3725 GIC 427070 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
3726 : {
3727 : MergeScanSelCache *cache;
3728 : ListCell *lc;
3729 : Selectivity leftstartsel,
5601 tgl 3730 ECB : leftendsel,
3731 : rightstartsel,
3732 : rightendsel;
3733 : MemoryContext oldcontext;
3734 :
5921 3735 : /* Do we have this result already? */
5921 tgl 3736 CBC 427091 : foreach(lc, rinfo->scansel_cache)
3737 : {
3738 384912 : cache = (MergeScanSelCache *) lfirst(lc);
3739 384912 : if (cache->opfamily == pathkey->pk_opfamily &&
4404 3740 384912 : cache->collation == pathkey->pk_eclass->ec_collation &&
5921 tgl 3741 GIC 384912 : cache->strategy == pathkey->pk_strategy &&
3742 384891 : cache->nulls_first == pathkey->pk_nulls_first)
3743 384891 : return cache;
3744 : }
3745 :
5921 tgl 3746 ECB : /* Nope, do the computation */
5921 tgl 3747 GIC 42179 : mergejoinscansel(root,
3748 42179 : (Node *) rinfo->clause,
3749 : pathkey->pk_opfamily,
3750 : pathkey->pk_strategy,
3751 42179 : pathkey->pk_nulls_first,
3752 : &leftstartsel,
3753 : &leftendsel,
3754 : &rightstartsel,
3755 : &rightendsel);
3756 :
5921 tgl 3757 ECB : /* Cache the result in suitably long-lived workspace */
5921 tgl 3758 GIC 42179 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
5921 tgl 3759 ECB :
5921 tgl 3760 CBC 42179 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
3761 42179 : cache->opfamily = pathkey->pk_opfamily;
4404 3762 42179 : cache->collation = pathkey->pk_eclass->ec_collation;
5921 3763 42179 : cache->strategy = pathkey->pk_strategy;
3764 42179 : cache->nulls_first = pathkey->pk_nulls_first;
5601 tgl 3765 GIC 42179 : cache->leftstartsel = leftstartsel;
3766 42179 : cache->leftendsel = leftendsel;
3767 42179 : cache->rightstartsel = rightstartsel;
5601 tgl 3768 CBC 42179 : cache->rightendsel = rightendsel;
5921 tgl 3769 ECB :
5921 tgl 3770 GIC 42179 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
3771 :
5921 tgl 3772 CBC 42179 : MemoryContextSwitchTo(oldcontext);
3773 :
5921 tgl 3774 GIC 42179 : return cache;
3775 : }
3776 :
3777 : /*
3778 : * initial_cost_hashjoin
4090 tgl 3779 ECB : * Preliminary estimate of the cost of a hashjoin path.
3780 : *
3781 : * This must quickly produce lower-bound estimates of the path's startup and
3782 : * total costs. If we are unable to eliminate the proposed path from
3783 : * consideration using the lower bounds, final_cost_hashjoin will be called
3784 : * to obtain the final estimates.
3785 : *
3786 : * The exact division of labor between this function and final_cost_hashjoin
3787 : * is private to them, and represents a tradeoff between speed of the initial
3788 : * estimate and getting a tight lower bound. We choose to not examine the
3789 : * join quals here (other than by counting the number of hash clauses),
3790 : * so we can't do much with CPU costs. We do assume that
3791 : * ExecChooseHashTableSize is cheap enough to use here.
3792 : *
3793 : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3794 : * other data to be used by final_cost_hashjoin
3795 : * 'jointype' is the type of join to be performed
3796 : * 'hashclauses' is the list of joinclauses to be used as hash clauses
3797 : * 'outer_path' is the outer input to the join
3798 : * 'inner_path' is the inner input to the join
3799 : * 'extra' contains miscellaneous information about the join
3800 : * 'parallel_hash' indicates that inner_path is partial and that a shared
3801 : * hash table will be built in parallel
3802 : */
3803 : void
4090 tgl 3804 GIC 226678 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3805 : JoinType jointype,
3806 : List *hashclauses,
3807 : Path *outer_path, Path *inner_path,
3808 : JoinPathExtraData *extra,
3809 : bool parallel_hash)
3810 : {
8454 3811 226678 : Cost startup_cost = 0;
3812 226678 : Cost run_cost = 0;
4090 3813 226678 : double outer_path_rows = outer_path->rows;
3814 226678 : double inner_path_rows = inner_path->rows;
1936 andres 3815 226678 : double inner_path_rows_total = inner_path_rows;
6888 neilc 3816 226678 : int num_hashclauses = list_length(hashclauses);
3817 : int numbuckets;
3818 : int numbatches;
3819 : int num_skew_mcvs;
3820 : size_t space_allowed; /* unused */
3821 :
3822 : /* cost of source data */
8454 tgl 3823 226678 : startup_cost += outer_path->startup_cost;
3824 226678 : run_cost += outer_path->total_cost - outer_path->startup_cost;
8454 tgl 3825 CBC 226678 : startup_cost += inner_path->total_cost;
3826 :
3827 : /*
3828 : * Cost of computing hash function: must do it once per input tuple. We
3829 : * charge one cpu_operator_cost for each column's hash function. Also,
3830 : * tack on one cpu_tuple_cost per inner row, to model the costs of
3831 : * inserting the row into the hashtable.
7377 tgl 3832 ECB : *
6385 bruce 3833 : * XXX when a hashclause is more complex than a single operator, we really
3834 : * should charge the extra eval costs of the left or right side, as
3835 : * appropriate, here. This seems more work than it's worth at the moment.
7377 tgl 3836 : */
5935 tgl 3837 CBC 226678 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
5935 tgl 3838 GIC 226678 : * inner_path_rows;
7377 3839 226678 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
3840 :
3841 : /*
3842 : * If this is a parallel hash build, then the value we have for
3843 : * inner_rows_total currently refers only to the rows returned by each
1936 andres 3844 ECB : * participant. For shared hash table size estimation, we need the total
3845 : * number, so we need to undo the division.
3846 : */
1936 andres 3847 GIC 226678 : if (parallel_hash)
3848 5589 : inner_path_rows_total *= get_parallel_divisor(inner_path);
3849 :
3850 : /*
3851 : * Get hash table size that executor would use for inner relation.
3852 : *
3853 : * XXX for the moment, always assume that skew optimization will be
3854 : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
3855 : * trying to determine that for sure.
3856 : *
3857 : * XXX at some point it might be interesting to try to account for skew
5132 tgl 3858 ECB : * optimization in the cost estimate, but for now, we don't.
3859 : */
1936 andres 3860 CBC 226678 : ExecChooseHashTableSize(inner_path_rows_total,
2607 tgl 3861 GIC 226678 : inner_path->pathtarget->width,
3862 : true, /* useskew */
3863 : parallel_hash, /* try_combined_hash_mem */
3864 : outer_path->parallel_workers,
3865 : &space_allowed,
3866 : &numbuckets,
3867 : &numbatches,
5132 tgl 3868 ECB : &num_skew_mcvs);
4090 3869 :
3870 : /*
3871 : * If inner relation is too big then we will need to "batch" the join,
3872 : * which implies writing and reading most of the tuples to disk an extra
3873 : * time. Charge seq_page_cost per page, since the I/O should be nice and
3874 : * sequential. Writing the inner rel counts as startup cost, all the rest
3875 : * as run cost.
3876 : */
4090 tgl 3877 GIC 226678 : if (numbatches > 1)
3878 : {
3879 2161 : double outerpages = page_size(outer_path_rows,
2607 3880 2161 : outer_path->pathtarget->width);
4090 tgl 3881 CBC 2161 : double innerpages = page_size(inner_path_rows,
2607 3882 2161 : inner_path->pathtarget->width);
3883 :
4090 tgl 3884 GIC 2161 : startup_cost += seq_page_cost * innerpages;
3885 2161 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
3886 : }
3887 :
3888 : /* CPU costs left for later */
3889 :
3890 : /* Public result fields */
3891 226678 : workspace->startup_cost = startup_cost;
3892 226678 : workspace->total_cost = startup_cost + run_cost;
3893 : /* Save private data for final_cost_hashjoin */
3894 226678 : workspace->run_cost = run_cost;
3895 226678 : workspace->numbuckets = numbuckets;
3896 226678 : workspace->numbatches = numbatches;
1936 andres 3897 226678 : workspace->inner_rows_total = inner_path_rows_total;
4090 tgl 3898 CBC 226678 : }
3899 :
4090 tgl 3900 ECB : /*
3901 : * final_cost_hashjoin
3902 : * Final estimate of the cost and result size of a hashjoin path.
3903 : *
3904 : * Note: the numbatches estimate is also saved into 'path' for use later
3905 : *
3906 : * 'path' is already filled in except for the rows and cost fields and
3907 : * num_batches
3908 : * 'workspace' is the result from initial_cost_hashjoin
3909 : * 'extra' contains miscellaneous information about the join
3910 : */
3911 : void
4090 tgl 3912 CBC 97728 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4090 tgl 3913 ECB : JoinCostWorkspace *workspace,
3914 : JoinPathExtraData *extra)
3915 : {
4090 tgl 3916 CBC 97728 : Path *outer_path = path->jpath.outerjoinpath;
3917 97728 : Path *inner_path = path->jpath.innerjoinpath;
3918 97728 : double outer_path_rows = outer_path->rows;
3919 97728 : double inner_path_rows = inner_path->rows;
1936 andres 3920 GIC 97728 : double inner_path_rows_total = workspace->inner_rows_total;
4090 tgl 3921 97728 : List *hashclauses = path->path_hashclauses;
3922 97728 : Cost startup_cost = workspace->startup_cost;
3923 97728 : Cost run_cost = workspace->run_cost;
3924 97728 : int numbuckets = workspace->numbuckets;
3925 97728 : int numbatches = workspace->numbatches;
3926 : Cost cpu_per_tuple;
3927 : QualCost hash_qual_cost;
3928 : QualCost qp_qual_cost;
3929 : double hashjointuples;
3930 : double virtualbuckets;
3931 : Selectivity innerbucketsize;
3932 : Selectivity innermcvfreq;
4090 tgl 3933 ECB : ListCell *hcl;
3934 :
3935 : /* Mark the path with the correct row estimate */
4007 tgl 3936 GIC 97728 : if (path->jpath.path.param_info)
4007 tgl 3937 CBC 474 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4007 tgl 3938 ECB : else
4007 tgl 3939 CBC 97254 : path->jpath.path.rows = path->jpath.path.parent->rows;
4090 tgl 3940 ECB :
2277 rhaas 3941 : /* For partial paths, scale row estimate. */
2277 rhaas 3942 CBC 97728 : if (path->jpath.path.parallel_workers > 0)
2216 rhaas 3943 ECB : {
2153 bruce 3944 CBC 5089 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
2216 rhaas 3945 ECB :
2216 rhaas 3946 CBC 5089 : path->jpath.path.rows =
2216 rhaas 3947 GIC 5089 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3948 : }
3949 :
3950 : /*
3951 : * We could include disable_cost in the preliminary estimate, but that
3952 : * would amount to optimizing for the case where the join method is
3953 : * disabled, which doesn't seem like the way to bet.
3954 : */
4090 tgl 3955 97728 : if (!enable_hashjoin)
3956 102 : startup_cost += disable_cost;
5050 bruce 3957 ECB :
5127 tgl 3958 : /* mark the path with estimated # of batches */
5127 tgl 3959 GIC 97728 : path->num_batches = numbatches;
7405 tgl 3960 ECB :
3961 : /* store the total number of tuples (sum of partial row estimates) */
1936 andres 3962 GIC 97728 : path->inner_rows_total = inner_path_rows_total;
1936 andres 3963 ECB :
3964 : /* and compute the number of "virtual" buckets in the whole join */
2118 tgl 3965 CBC 97728 : virtualbuckets = (double) numbuckets * (double) numbatches;
3966 :
7978 tgl 3967 ECB : /*
2063 3968 : * Determine bucketsize fraction and MCV frequency for the inner relation.
3969 : * We use the smallest bucketsize or MCV frequency estimated for any
3970 : * individual hashclause; this is undoubtedly conservative.
3971 : *
3972 : * BUT: if inner relation has been unique-ified, we can assume it's good
3973 : * for hashing. This is important both because it's the right answer, and
3974 : * because we avoid contaminating the cache with a value that's wrong for
3975 : * non-unique-ified paths.
7978 3976 : */
7376 tgl 3977 CBC 97728 : if (IsA(inner_path, UniquePath))
3978 : {
7376 tgl 3979 GIC 537 : innerbucketsize = 1.0 / virtualbuckets;
2063 tgl 3980 CBC 537 : innermcvfreq = 0.0;
3981 : }
3982 : else
7978 tgl 3983 ECB : {
7376 tgl 3984 GIC 97191 : innerbucketsize = 1.0;
2063 3985 97191 : innermcvfreq = 1.0;
7376 tgl 3986 CBC 204945 : foreach(hcl, hashclauses)
3987 : {
2190 tgl 3988 GIC 107754 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
3989 : Selectivity thisbucketsize;
3990 : Selectivity thismcvfreq;
3991 :
3992 : /*
3993 : * First we have to figure out which side of the hashjoin clause
3994 : * is the inner side.
3995 : *
3996 : * Since we tend to visit the same clauses over and over when
3997 : * planning a large query, we cache the bucket stats estimates in
2063 tgl 3998 ECB : * the RestrictInfo node to avoid repeated lookups of statistics.
3999 : */
7365 tgl 4000 CBC 107754 : if (bms_is_subset(restrictinfo->right_relids,
4001 107754 : inner_path->parent->relids))
4002 : {
4003 : /* righthand side is inner */
7376 tgl 4004 GIC 57916 : thisbucketsize = restrictinfo->right_bucketsize;
7376 tgl 4005 CBC 57916 : if (thisbucketsize < 0)
7376 tgl 4006 ECB : {
4007 : /* not cached yet */
2063 tgl 4008 GIC 32400 : estimate_hash_bucket_stats(root,
2063 tgl 4009 CBC 32400 : get_rightop(restrictinfo->clause),
4010 : virtualbuckets,
4011 : &restrictinfo->right_mcvfreq,
4012 : &restrictinfo->right_bucketsize);
2063 tgl 4013 GIC 32400 : thisbucketsize = restrictinfo->right_bucketsize;
4014 : }
4015 57916 : thismcvfreq = restrictinfo->right_mcvfreq;
4016 : }
4017 : else
4018 : {
7365 4019 49838 : Assert(bms_is_subset(restrictinfo->left_relids,
4020 : inner_path->parent->relids));
7376 tgl 4021 ECB : /* lefthand side is inner */
7376 tgl 4022 CBC 49838 : thisbucketsize = restrictinfo->left_bucketsize;
7376 tgl 4023 GIC 49838 : if (thisbucketsize < 0)
4024 : {
7376 tgl 4025 ECB : /* not cached yet */
2063 tgl 4026 CBC 27324 : estimate_hash_bucket_stats(root,
2063 tgl 4027 GIC 27324 : get_leftop(restrictinfo->clause),
4028 : virtualbuckets,
2063 tgl 4029 ECB : &restrictinfo->left_mcvfreq,
4030 : &restrictinfo->left_bucketsize);
2063 tgl 4031 GIC 27324 : thisbucketsize = restrictinfo->left_bucketsize;
4032 : }
4033 49838 : thismcvfreq = restrictinfo->left_mcvfreq;
7435 tgl 4034 ECB : }
4035 :
7376 tgl 4036 CBC 107754 : if (innerbucketsize > thisbucketsize)
7376 tgl 4037 GIC 70901 : innerbucketsize = thisbucketsize;
2063 4038 107754 : if (innermcvfreq > thismcvfreq)
4039 99121 : innermcvfreq = thismcvfreq;
7376 tgl 4040 ECB : }
4041 : }
4042 :
2063 4043 : /*
984 pg 4044 : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4045 : * want to hash unless there is really no other alternative, so apply
4046 : * disable_cost. (The executor normally copes with excessive memory usage
2063 tgl 4047 : * by splitting batches, but obviously it cannot separate equal values
984 pg 4048 : * that way, so it will be unable to drive the batch size below hash_mem
4049 : * when this is true.)
4050 : */
2063 tgl 4051 GIC 97728 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
623 tgl 4052 CBC 195456 : inner_path->pathtarget->width) > get_hash_memory_limit())
2063 tgl 4053 UIC 0 : startup_cost += disable_cost;
2063 tgl 4054 ECB :
4055 : /*
4056 : * Compute cost of the hashquals and qpquals (other restriction clauses)
4090 4057 : * separately.
8647 4058 : */
4090 tgl 4059 CBC 97728 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4060 97728 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4090 tgl 4061 GIC 97728 : qp_qual_cost.startup -= hash_qual_cost.startup;
4062 97728 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4063 :
4064 : /* CPU costs */
4065 :
2193 4066 97728 : if (path->jpath.jointype == JOIN_SEMI ||
4067 96784 : path->jpath.jointype == JOIN_ANTI ||
4068 93205 : extra->inner_unique)
5083 4069 43275 : {
4070 : double outer_matched_rows;
4071 : Selectivity inner_scan_frac;
5083 tgl 4072 ECB :
4073 : /*
2193 tgl 4074 EUB : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4075 : * executor will stop after the first match.
4076 : *
4077 : * For an outer-rel row that has at least one match, we can expect the
4078 : * bucket scan to stop after a fraction 1/(match_count+1) of the
4079 : * bucket's rows, if the matches are evenly distributed. Since they
5083 tgl 4080 ECB : * probably aren't quite evenly distributed, we apply a fuzz factor of
4081 : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4082 : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4083 : * at least 1, no such clamp is needed now.)
4084 : */
2193 tgl 4085 GIC 43275 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4086 43275 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
5083 tgl 4087 ECB :
5083 tgl 4088 CBC 43275 : startup_cost += hash_qual_cost.startup;
4089 86550 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4090 43275 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4091 :
4092 : /*
4093 : * For unmatched outer-rel rows, the picture is quite a lot different.
4094 : * In the first place, there is no reason to assume that these rows
4095 : * preferentially hit heavily-populated buckets; instead assume they
4096 : * are uncorrelated with the inner distribution and so they see an
4097 : * average bucket size of inner_path_rows / virtualbuckets. In the
4098 : * second place, it seems likely that they will have few if any exact
4099 : * hash-code matches and so very few of the tuples in the bucket will
4100 : * actually require eval of the hash quals. We don't have any good
4101 : * way to estimate how many will, but for the moment assume that the
4102 : * effective cost per bucket entry is one-tenth what it is for
4103 : * matchable tuples.
4104 : */
5083 tgl 4105 GIC 86550 : run_cost += hash_qual_cost.per_tuple *
5083 tgl 4106 CBC 86550 : (outer_path_rows - outer_matched_rows) *
4107 43275 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4108 :
5083 tgl 4109 ECB : /* Get # of tuples that will pass the basic join */
1730 tgl 4110 CBC 43275 : if (path->jpath.jointype == JOIN_ANTI)
5083 4111 3579 : hashjointuples = outer_path_rows - outer_matched_rows;
4112 : else
1730 tgl 4113 GIC 39696 : hashjointuples = outer_matched_rows;
4114 : }
4115 : else
4116 : {
4117 : /*
4118 : * The number of tuple comparisons needed is the number of outer
4119 : * tuples times the typical number of tuples in a hash bucket, which
4120 : * is the inner relation size times its bucketsize fraction. At each
4121 : * one, we need to evaluate the hashjoin quals. But actually,
4122 : * charging the full qual eval cost at each tuple is pessimistic,
4123 : * since we don't evaluate the quals unless the hash values match
4124 : * exactly. For lack of a better idea, halve the cost estimate to
4125 : * allow for that.
5083 tgl 4126 ECB : */
5083 tgl 4127 CBC 54453 : startup_cost += hash_qual_cost.startup;
4128 108906 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
5083 tgl 4129 GIC 54453 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4130 :
5083 tgl 4131 ECB : /*
4132 : * Get approx # tuples passing the hashquals. We use
4133 : * approx_tuple_count here because we need an estimate done with
4134 : * JOIN_INNER semantics.
4135 : */
5083 tgl 4136 GIC 54453 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4137 : }
4138 :
4139 : /*
4140 : * For each tuple that gets through the hashjoin proper, we charge
4141 : * cpu_tuple_cost plus the cost of evaluating additional restriction
4142 : * clauses that are to be applied at the join. (This is pessimistic since
4143 : * not all of the quals may get evaluated at each tuple.)
4144 : */
7377 4145 97728 : startup_cost += qp_qual_cost.startup;
4146 97728 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
5349 4147 97728 : run_cost += cpu_per_tuple * hashjointuples;
7377 tgl 4148 ECB :
2607 4149 : /* tlist eval costs are paid per output row, not per tuple scanned */
2607 tgl 4150 CBC 97728 : startup_cost += path->jpath.path.pathtarget->cost.startup;
2607 tgl 4151 GIC 97728 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4152 :
7377 4153 97728 : path->jpath.path.startup_cost = startup_cost;
4154 97728 : path->jpath.path.total_cost = startup_cost + run_cost;
8454 4155 97728 : }
4156 :
8454 tgl 4157 ECB :
4158 : /*
4159 : * cost_subplan
4160 : * Figure the costs for a SubPlan (or initplan).
4161 : *
4162 : * Note: we could dig the subplan's Plan out of the root list, but in practice
4163 : * all callers have it handy already, so we make them pass it.
4164 : */
4165 : void
5343 tgl 4166 CBC 18986 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
5343 tgl 4167 ECB : {
4168 : QualCost sp_cost;
4169 :
4170 : /* Figure any cost for evaluating the testexpr */
5343 tgl 4171 CBC 18986 : cost_qual_eval(&sp_cost,
4172 18986 : make_ands_implicit((Expr *) subplan->testexpr),
4173 : root);
5343 tgl 4174 ECB :
5343 tgl 4175 CBC 18986 : if (subplan->useHashTable)
5343 tgl 4176 ECB : {
4177 : /*
4178 : * If we are using a hash table for the subquery outputs, then the
4179 : * cost of evaluating the query is a one-time cost. We charge one
4180 : * cpu_operator_cost per tuple for the work of loading the hashtable,
4181 : * too.
4182 : */
5343 tgl 4183 GIC 895 : sp_cost.startup += plan->total_cost +
4184 895 : cpu_operator_cost * plan->plan_rows;
4185 :
4186 : /*
5343 tgl 4187 ECB : * The per-tuple costs include the cost of evaluating the lefthand
4188 : * expressions, plus the cost of probing the hashtable. We already
4189 : * accounted for the lefthand expressions as part of the testexpr, and
4190 : * will also have counted one cpu_operator_cost for each comparison
4191 : * operator. That is probably too low for the probing cost, but it's
5050 bruce 4192 : * hard to make a better estimate, so live with it for now.
5343 tgl 4193 : */
4194 : }
4195 : else
4196 : {
4197 : /*
4198 : * Otherwise we will be rescanning the subplan output on each
4199 : * evaluation. We need to estimate how much of the output we will
4200 : * actually need to scan. NOTE: this logic should agree with the
4201 : * tuple_fraction estimates used by make_subplan() in
4202 : * plan/subselect.c.
4203 : */
5343 tgl 4204 CBC 18091 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
5343 tgl 4205 ECB :
5343 tgl 4206 GIC 18091 : if (subplan->subLinkType == EXISTS_SUBLINK)
4207 : {
4208 : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
2570 4209 940 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4210 : }
5343 4211 17151 : else if (subplan->subLinkType == ALL_SUBLINK ||
4212 17142 : subplan->subLinkType == ANY_SUBLINK)
4213 : {
4214 : /* assume we need 50% of the tuples */
4215 65 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4216 : /* also charge a cpu_operator_cost per row examined */
4217 65 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4218 : }
4219 : else
4220 : {
4221 : /* assume we need all tuples */
4222 17086 : sp_cost.per_tuple += plan_run_cost;
4223 : }
4224 :
5343 tgl 4225 ECB : /*
4226 : * Also account for subplan's startup cost. If the subplan is
4957 4227 : * uncorrelated or undirect correlated, AND its topmost node is one
4228 : * that materializes its output, assume that we'll only need to pay
4229 : * its startup cost once; otherwise assume we pay the startup cost
4230 : * every time.
4231 : */
5343 tgl 4232 CBC 26280 : if (subplan->parParam == NIL &&
4957 4233 8189 : ExecMaterializesOutput(nodeTag(plan)))
5343 tgl 4234 GIC 214 : sp_cost.startup += plan->startup_cost;
4235 : else
5343 tgl 4236 CBC 17877 : sp_cost.per_tuple += plan->startup_cost;
4237 : }
5343 tgl 4238 ECB :
5343 tgl 4239 GIC 18986 : subplan->startup_cost = sp_cost.startup;
4240 18986 : subplan->per_call_cost = sp_cost.per_tuple;
4241 18986 : }
4242 :
5343 tgl 4243 ECB :
4244 : /*
4245 : * cost_rescan
4246 : * Given a finished Path, estimate the costs of rescanning it after
4247 : * having done so the first time. For some Path types a rescan is
4248 : * cheaper than an original scan (if no parameters change), and this
4249 : * function embodies knowledge about that. The default is to return
4250 : * the same costs stored in the Path. (Note that the cost estimates
4251 : * actually stored in Paths are always for first scans.)
4252 : *
4957 4253 : * This function is not currently intended to model effects such as rescans
4254 : * being cheaper due to disk block caching; what we are concerned with is
4255 : * plan types wherein the executor caches results explicitly, or doesn't
4256 : * redo startup calculations, etc.
4257 : */
4258 : static void
4957 tgl 4259 GIC 920500 : cost_rescan(PlannerInfo *root, Path *path,
4790 bruce 4260 ECB : Cost *rescan_startup_cost, /* output parameters */
4957 tgl 4261 : Cost *rescan_total_cost)
4262 : {
4957 tgl 4263 GIC 920500 : switch (path->pathtype)
4264 : {
4265 16600 : case T_FunctionScan:
4266 :
4267 : /*
4268 : * Currently, nodeFunctionscan.c always executes the function to
4269 : * completion before returning any rows, and caches the results in
4270 : * a tuplestore. So the function eval cost is all startup cost
4271 : * and isn't paid over again on rescans. However, all run costs
4272 : * will be paid over again.
4273 : */
4274 16600 : *rescan_startup_cost = 0;
4275 16600 : *rescan_total_cost = path->total_cost - path->startup_cost;
4276 16600 : break;
4277 43325 : case T_HashJoin:
4278 :
4279 : /*
2447 tgl 4280 ECB : * If it's a single-batch join, we don't need to rebuild the hash
4281 : * table during a rescan.
4282 : */
2447 tgl 4283 GIC 43325 : if (((HashPath *) path)->num_batches == 1)
2447 tgl 4284 ECB : {
4285 : /* Startup cost is exactly the cost of hash table building */
2447 tgl 4286 CBC 43325 : *rescan_startup_cost = 0;
2447 tgl 4287 GIC 43325 : *rescan_total_cost = path->total_cost - path->startup_cost;
4288 : }
4289 : else
4290 : {
4291 : /* Otherwise, no special treatment */
2447 tgl 4292 UIC 0 : *rescan_startup_cost = path->startup_cost;
4293 0 : *rescan_total_cost = path->total_cost;
4294 : }
4957 tgl 4295 CBC 43325 : break;
4296 2444 : case T_CteScan:
4957 tgl 4297 ECB : case T_WorkTableScan:
4298 : {
4299 : /*
4300 : * These plan types materialize their final result in a
4301 : * tuplestore or tuplesort object. So the rescan cost is only
4302 : * cpu_tuple_cost per tuple, unless the result is large enough
4303 : * to spill to disk.
4304 : */
4090 tgl 4305 GIC 2444 : Cost run_cost = cpu_tuple_cost * path->rows;
4306 2444 : double nbytes = relation_byte_size(path->rows,
2118 tgl 4307 CBC 2444 : path->pathtarget->width);
4790 bruce 4308 2444 : long work_mem_bytes = work_mem * 1024L;
4309 :
4957 tgl 4310 GIC 2444 : if (nbytes > work_mem_bytes)
4311 : {
4312 : /* It will spill, so account for re-read cost */
4957 tgl 4313 GBC 48 : double npages = ceil(nbytes / BLCKSZ);
4957 tgl 4314 EUB :
4957 tgl 4315 GIC 48 : run_cost += seq_page_cost * npages;
4957 tgl 4316 ECB : }
4957 tgl 4317 CBC 2444 : *rescan_startup_cost = 0;
4957 tgl 4318 GIC 2444 : *rescan_total_cost = run_cost;
4319 : }
4320 2444 : break;
4797 4321 310594 : case T_Material:
4322 : case T_Sort:
4323 : {
4324 : /*
4325 : * These plan types not only materialize their results, but do
3260 bruce 4326 ECB : * not implement qual filtering or projection. So they are
4327 : * even cheaper to rescan than the ones above. We charge only
4790 4328 : * cpu_operator_cost per tuple. (Note: keep that in sync with
4329 : * the run_cost charge in cost_sort, and also see comments in
4330 : * cost_material before you change it.)
4797 tgl 4331 : */
4090 tgl 4332 GIC 310594 : Cost run_cost = cpu_operator_cost * path->rows;
4333 310594 : double nbytes = relation_byte_size(path->rows,
2118 tgl 4334 CBC 310594 : path->pathtarget->width);
4790 bruce 4335 GIC 310594 : long work_mem_bytes = work_mem * 1024L;
4797 tgl 4336 ECB :
4797 tgl 4337 GIC 310594 : if (nbytes > work_mem_bytes)
4797 tgl 4338 ECB : {
4339 : /* It will spill, so account for re-read cost */
4797 tgl 4340 GIC 3978 : double npages = ceil(nbytes / BLCKSZ);
4797 tgl 4341 ECB :
4797 tgl 4342 CBC 3978 : run_cost += seq_page_cost * npages;
4343 : }
4797 tgl 4344 GIC 310594 : *rescan_startup_cost = 0;
4345 310594 : *rescan_total_cost = run_cost;
4346 : }
4347 310594 : break;
634 drowley 4348 89794 : case T_Memoize:
4349 : /* All the hard work is done by cost_memoize_rescan */
4350 89794 : cost_memoize_rescan(root, (MemoizePath *) path,
4351 : rescan_startup_cost, rescan_total_cost);
737 4352 89794 : break;
4957 tgl 4353 CBC 457743 : default:
4354 457743 : *rescan_startup_cost = path->startup_cost;
4355 457743 : *rescan_total_cost = path->total_cost;
4356 457743 : break;
4357 : }
4358 920500 : }
4359 :
4360 :
8454 tgl 4361 ECB : /*
4362 : * cost_qual_eval
7392 4363 : * Estimate the CPU costs of evaluating a WHERE clause.
4364 : * The input can be either an implicitly-ANDed list of boolean
5921 4365 : * expressions, or a list of RestrictInfo nodes. (The latter is
4366 : * preferred since it allows caching of the results.)
4367 : * The result includes both a one-time (startup) component,
7392 4368 : * and a per-evaluation component.
8454 4369 : */
4370 : void
5890 tgl 4371 CBC 1334909 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4372 : {
5890 tgl 4373 ECB : cost_qual_eval_context context;
6892 neilc 4374 : ListCell *l;
8454 tgl 4375 :
5890 tgl 4376 CBC 1334909 : context.root = root;
4377 1334909 : context.total.startup = 0;
5890 tgl 4378 GIC 1334909 : context.total.per_tuple = 0;
7392 tgl 4379 ECB :
4380 : /* We don't charge any cost for the implicit ANDing at top level ... */
4381 :
8153 tgl 4382 GIC 2470078 : foreach(l, quals)
4383 : {
8053 bruce 4384 1135169 : Node *qual = (Node *) lfirst(l);
4385 :
5890 tgl 4386 1135169 : cost_qual_eval_walker(qual, &context);
4387 : }
4388 :
4389 1334909 : *cost = context.total;
8454 4390 1334909 : }
4391 :
5921 tgl 4392 ECB : /*
4393 : * cost_qual_eval_node
4394 : * As above, for a single RestrictInfo or expression.
4395 : */
4396 : void
5890 tgl 4397 CBC 712599 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
5921 tgl 4398 ECB : {
5890 4399 : cost_qual_eval_context context;
4400 :
5890 tgl 4401 GIC 712599 : context.root = root;
4402 712599 : context.total.startup = 0;
5890 tgl 4403 CBC 712599 : context.total.per_tuple = 0;
4404 :
4405 712599 : cost_qual_eval_walker(qual, &context);
4406 :
4407 712599 : *cost = context.total;
5921 tgl 4408 GIC 712599 : }
4409 :
8454 tgl 4410 ECB : static bool
5624 bruce 4411 CBC 3064025 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4412 : {
8454 tgl 4413 GIC 3064025 : if (node == NULL)
4414 28407 : return false;
4415 :
4416 : /*
4417 : * RestrictInfo nodes contain an eval_cost field reserved for this
5624 bruce 4418 ECB : * routine's use, so that it's not necessary to evaluate the qual clause's
4419 : * cost more than once. If the clause's cost hasn't been computed yet,
4420 : * the field's startup value will contain -1.
4421 : */
5921 tgl 4422 CBC 3035618 : if (IsA(node, RestrictInfo))
5921 tgl 4423 ECB : {
5921 tgl 4424 CBC 1199066 : RestrictInfo *rinfo = (RestrictInfo *) node;
4425 :
4426 1199066 : if (rinfo->eval_cost.startup < 0)
4427 : {
5890 tgl 4428 ECB : cost_qual_eval_context locContext;
4429 :
5890 tgl 4430 GIC 212365 : locContext.root = context->root;
4431 212365 : locContext.total.startup = 0;
5890 tgl 4432 CBC 212365 : locContext.total.per_tuple = 0;
4433 :
5921 tgl 4434 ECB : /*
5624 bruce 4435 : * For an OR clause, recurse into the marked-up tree so that we
4436 : * set the eval_cost for contained RestrictInfos too.
4437 : */
5921 tgl 4438 GIC 212365 : if (rinfo->orclause)
5890 4439 3374 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4440 : else
4441 208991 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4442 :
5921 tgl 4443 ECB : /*
4444 : * If the RestrictInfo is marked pseudoconstant, it will be tested
4445 : * only once, so treat its cost as all startup cost.
4446 : */
5921 tgl 4447 CBC 212365 : if (rinfo->pseudoconstant)
4448 : {
4449 : /* count one execution during startup */
5890 tgl 4450 GIC 3710 : locContext.total.startup += locContext.total.per_tuple;
5890 tgl 4451 CBC 3710 : locContext.total.per_tuple = 0;
5921 tgl 4452 ECB : }
5890 tgl 4453 CBC 212365 : rinfo->eval_cost = locContext.total;
4454 : }
5890 tgl 4455 GIC 1199066 : context->total.startup += rinfo->eval_cost.startup;
4456 1199066 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4457 : /* do NOT recurse into children */
5921 4458 1199066 : return false;
5921 tgl 4459 ECB : }
4460 :
4461 : /*
4462 : * For each operator or function node in the given tree, we charge the
4463 : * estimated execution cost given by pg_proc.procost (remember to multiply
4464 : * this by cpu_operator_cost).
4465 : *
4466 : * Vars and Consts are charged zero, and so are boolean operators (AND,
4467 : * OR, NOT). Simplistic, but a lot better than no model at all.
8454 4468 : *
4469 : * Should we try to account for the possibility of short-circuit
4470 : * evaluation of AND/OR? Probably *not*, because that would make the
5921 4471 : * results depend on the clause ordering, and we are not in any position
4472 : * to expect that the current ordering of the clauses is the one that's
4473 : * going to end up being used. The above per-RestrictInfo caching would
4368 4474 : * not mix well with trying to re-order clauses anyway.
4475 : *
3914 4476 : * Another issue that is entirely ignored here is that if a set-returning
4477 : * function is below top level in the tree, the functions/operators above
4478 : * it will need to be evaluated multiple times. In practical use, such
4479 : * cases arise so seldom as to not be worth the added complexity needed;
4480 : * moreover, since our rowcount estimates for functions tend to be pretty
4481 : * phony, the results would also be pretty phony.
4482 : */
5921 tgl 4483 GIC 1836552 : if (IsA(node, FuncExpr))
4484 : {
1520 4485 137015 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4486 : &context->total);
4487 : }
5921 4488 1699537 : else if (IsA(node, OpExpr) ||
4489 1469619 : IsA(node, DistinctExpr) ||
4490 1469258 : IsA(node, NullIfExpr))
4491 : {
4492 : /* rely on struct equivalence to treat these all alike */
4493 230346 : set_opfuncid((OpExpr *) node);
1520 4494 230346 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4495 : &context->total);
4496 : }
7224 4497 1469191 : else if (IsA(node, ScalarArrayOpExpr))
4498 : {
6343 4499 16679 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
6031 bruce 4500 16679 : Node *arraynode = (Node *) lsecond(saop->args);
4501 : QualCost sacosts;
4502 : QualCost hcosts;
731 drowley 4503 16679 : int estarraylen = estimate_array_length(arraynode);
6343 tgl 4504 ECB :
5921 tgl 4505 GIC 16679 : set_sa_opfuncid(saop);
1520 tgl 4506 CBC 16679 : sacosts.startup = sacosts.per_tuple = 0;
1520 tgl 4507 GIC 16679 : add_function_cost(context->root, saop->opfuncid, NULL,
4508 : &sacosts);
731 drowley 4509 ECB :
731 drowley 4510 CBC 16679 : if (OidIsValid(saop->hashfuncid))
731 drowley 4511 ECB : {
4512 : /* Handle costs for hashed ScalarArrayOpExpr */
731 drowley 4513 GIC 133 : hcosts.startup = hcosts.per_tuple = 0;
731 drowley 4514 ECB :
731 drowley 4515 CBC 133 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
731 drowley 4516 GIC 133 : context->total.startup += sacosts.startup + hcosts.startup;
4517 :
731 drowley 4518 ECB : /* Estimate the cost of building the hashtable. */
731 drowley 4519 GIC 133 : context->total.startup += estarraylen * hcosts.per_tuple;
731 drowley 4520 ECB :
4521 : /*
4522 : * XXX should we charge a little bit for sacosts.per_tuple when
4523 : * building the table, or is it ok to assume there will be zero
4524 : * hash collision?
4525 : */
4526 :
4527 : /*
4528 : * Charge for hashtable lookups. Charge a single hash and a
4529 : * single comparison.
4530 : */
731 drowley 4531 CBC 133 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4532 : }
4533 : else
731 drowley 4534 ECB : {
4535 : /*
4536 : * Estimate that the operator will be applied to about half of the
4537 : * array elements before the answer is determined.
4538 : */
731 drowley 4539 GIC 16546 : context->total.startup += sacosts.startup;
731 drowley 4540 CBC 33092 : context->total.per_tuple += sacosts.per_tuple *
731 drowley 4541 GIC 16546 : estimate_array_length(arraynode) * 0.5;
4542 : }
4543 : }
4368 tgl 4544 1452512 : else if (IsA(node, Aggref) ||
4545 1428742 : IsA(node, WindowFunc))
4546 : {
4547 : /*
4548 : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4549 : * ie, zero execution cost in the current model, because they behave
4550 : * essentially like Vars at execution. We disregard the costs of
4551 : * their input expressions for the same reason. The actual execution
4368 tgl 4552 ECB : * costs of the aggregate/window functions and their arguments have to
4553 : * be factored into plan-node-specific costing of the Agg or WindowAgg
4554 : * plan node.
4555 : */
4368 tgl 4556 GIC 25231 : return false; /* don't recurse into children */
4557 : }
384 4558 1427281 : else if (IsA(node, GroupingFunc))
4559 : {
384 tgl 4560 ECB : /* Treat this as having cost 1 */
384 tgl 4561 CBC 175 : context->total.per_tuple += cpu_operator_cost;
4562 175 : return false; /* don't recurse into children */
4563 : }
5787 tgl 4564 GIC 1427106 : else if (IsA(node, CoerceViaIO))
5787 tgl 4565 ECB : {
5787 tgl 4566 CBC 8325 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4567 : Oid iofunc;
4568 : Oid typioparam;
4569 : bool typisvarlena;
4570 :
4571 : /* check the result type's input function */
5787 tgl 4572 GIC 8325 : getTypeInputInfo(iocoerce->resulttype,
4573 : &iofunc, &typioparam);
1520 4574 8325 : add_function_cost(context->root, iofunc, NULL,
4575 : &context->total);
4576 : /* check the input type's output function */
5787 tgl 4577 CBC 8325 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4578 : &iofunc, &typisvarlena);
1520 4579 8325 : add_function_cost(context->root, iofunc, NULL,
4580 : &context->total);
4581 : }
5857 4582 1418781 : else if (IsA(node, ArrayCoerceExpr))
5857 tgl 4583 ECB : {
5857 tgl 4584 GIC 1950 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
2017 tgl 4585 ECB : QualCost perelemcost;
4586 :
2017 tgl 4587 CBC 1950 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4588 : context->root);
2017 tgl 4589 GIC 1950 : context->total.startup += perelemcost.startup;
4590 1950 : if (perelemcost.per_tuple > 0)
4591 24 : context->total.per_tuple += perelemcost.per_tuple *
4592 24 : estimate_array_length((Node *) acoerce->arg);
5857 tgl 4593 ECB : }
6311 tgl 4594 GIC 1416831 : else if (IsA(node, RowCompareExpr))
6311 tgl 4595 ECB : {
4596 : /* Conservatively assume we will check all the columns */
6311 tgl 4597 GIC 78 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
5921 tgl 4598 ECB : ListCell *lc;
4599 :
5921 tgl 4600 CBC 261 : foreach(lc, rcexpr->opnos)
4601 : {
5624 bruce 4602 GIC 183 : Oid opid = lfirst_oid(lc);
5921 tgl 4603 ECB :
1520 tgl 4604 GIC 183 : add_function_cost(context->root, get_opcode(opid), NULL,
1520 tgl 4605 ECB : &context->total);
4606 : }
4607 : }
2095 tgl 4608 CBC 1416753 : else if (IsA(node, MinMaxExpr) ||
4609 1416651 : IsA(node, XmlExpr) ||
4610 1416306 : IsA(node, CoerceToDomain) ||
220 andrew 4611 1393644 : IsA(node, NextValueExpr))
2095 tgl 4612 ECB : {
4613 : /* Treat all these as having cost 1 */
2095 tgl 4614 CBC 23243 : context->total.per_tuple += cpu_operator_cost;
4615 : }
5781 tgl 4616 GIC 1393510 : else if (IsA(node, CurrentOfExpr))
5781 tgl 4617 ECB : {
4618 : /* Report high cost to prevent selection of anything but TID scan */
5646 tgl 4619 GIC 197 : context->total.startup += disable_cost;
5781 tgl 4620 ECB : }
7392 tgl 4621 GIC 1393313 : else if (IsA(node, SubLink))
7392 tgl 4622 ECB : {
4623 : /* This routine should not be applied to un-planned expressions */
7198 tgl 4624 LBC 0 : elog(ERROR, "cannot handle unplanned sub-select");
4625 : }
7421 tgl 4626 GIC 1393313 : else if (IsA(node, SubPlan))
4627 : {
7423 tgl 4628 ECB : /*
7392 4629 : * A subplan node in an expression typically indicates that the
6385 bruce 4630 : * subplan will be executed on each evaluation, so charge accordingly.
4631 : * (Sub-selects that can be executed as InitPlans have already been
4632 : * removed from the expression.)
4633 : */
7188 bruce 4634 CBC 15397 : SubPlan *subplan = (SubPlan *) node;
4635 :
5343 tgl 4636 15397 : context->total.startup += subplan->startup_cost;
5343 tgl 4637 GIC 15397 : context->total.per_tuple += subplan->per_call_cost;
4638 :
5343 tgl 4639 ECB : /*
4640 : * We don't want to recurse into the testexpr, because it was already
4641 : * counted in the SubPlan node's costs. So we're done.
4642 : */
5343 tgl 4643 GIC 15397 : return false;
5343 tgl 4644 EUB : }
5343 tgl 4645 GIC 1377916 : else if (IsA(node, AlternativeSubPlan))
5343 tgl 4646 ECB : {
4647 : /*
4648 : * Arbitrarily use the first alternative plan for costing. (We should
4649 : * certainly only include one alternative, and we don't yet have
4650 : * enough information to know which one the executor is most likely to
4651 : * use.)
4652 : */
5343 tgl 4653 GIC 762 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
7188 bruce 4654 ECB :
5343 tgl 4655 GIC 762 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
5343 tgl 4656 ECB : context);
8454 4657 : }
2607 tgl 4658 GIC 1377154 : else if (IsA(node, PlaceHolderVar))
4659 : {
4660 : /*
4661 : * A PlaceHolderVar should be given cost zero when considering general
4662 : * expression evaluation costs. The expense of doing the contained
2607 tgl 4663 ECB : * expression is charged as part of the tlist eval costs of the scan
4664 : * or join where the PHV is first computed (see set_rel_width and
4665 : * add_placeholders_to_joinrel). If we charged it again here, we'd be
4666 : * double-counting the cost for each level of plan that the PHV
4667 : * bubbles up through. Hence, return without recursing into the
4668 : * phexpr.
4669 : */
2607 tgl 4670 GIC 1023 : return false;
4671 : }
4672 :
5921 tgl 4673 ECB : /* recurse into children */
8454 tgl 4674 GIC 1793964 : return expression_tree_walker(node, cost_qual_eval_walker,
5890 tgl 4675 ECB : (void *) context);
4676 : }
4677 :
4007 4678 : /*
4679 : * get_restriction_qual_cost
4680 : * Compute evaluation costs of a baserel's restriction quals, plus any
4681 : * movable join quals that have been pushed down to the scan.
4682 : * Results are returned into *qpqual_cost.
4683 : *
4684 : * This is a convenience subroutine that works for seqscans and other cases
4685 : * where all the given quals will be evaluated the hard way. It's not useful
4686 : * for cost_index(), for example, where the index machinery takes care of
4687 : * some of the quals. We assume baserestrictcost was previously set by
4688 : * set_baserel_size_estimates().
4689 : */
4690 : static void
4007 tgl 4691 GIC 381093 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
4692 : ParamPathInfo *param_info,
4693 : QualCost *qpqual_cost)
4007 tgl 4694 ECB : {
4007 tgl 4695 GIC 381093 : if (param_info)
4696 : {
4697 : /* Include costs of pushed-down clauses */
4698 78594 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
4699 :
4700 78594 : qpqual_cost->startup += baserel->baserestrictcost.startup;
4701 78594 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
4702 : }
4703 : else
4704 302499 : *qpqual_cost = baserel->baserestrictcost;
4705 381093 : }
4706 :
4707 :
4708 : /*
4709 : * compute_semi_anti_join_factors
4710 : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
5083 tgl 4711 ECB : * can be expected to scan.
4712 : *
4713 : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
4714 : * inner rows as soon as it finds a match to the current outer row.
2193 4715 : * The same happens if we have detected the inner rel is unique.
4716 : * We should therefore adjust some of the cost components for this effect.
4717 : * This function computes some estimates needed for these adjustments.
4090 4718 : * These estimates will be the same regardless of the particular paths used
4719 : * for the outer and inner relation, so we compute these once and then pass
4720 : * them to all the join cost estimation functions.
4721 : *
4722 : * Input parameters:
4723 : * joinrel: join relation under consideration
4724 : * outerrel: outer relation under consideration
4725 : * innerrel: inner relation under consideration
4726 : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
4727 : * sjinfo: SpecialJoinInfo relevant to this join
4728 : * restrictlist: join quals
4729 : * Output parameters:
4730 : * *semifactors is filled in (see pathnodes.h for field definitions)
4731 : */
4732 : void
4090 tgl 4733 GIC 77000 : compute_semi_anti_join_factors(PlannerInfo *root,
4734 : RelOptInfo *joinrel,
4735 : RelOptInfo *outerrel,
4736 : RelOptInfo *innerrel,
4737 : JoinType jointype,
4738 : SpecialJoinInfo *sjinfo,
4739 : List *restrictlist,
4740 : SemiAntiJoinFactors *semifactors)
4741 : {
4742 : Selectivity jselec;
4743 : Selectivity nselec;
4744 : Selectivity avgmatch;
4745 : SpecialJoinInfo norm_sjinfo;
4746 : List *joinquals;
4747 : ListCell *l;
4748 :
4749 : /*
4750 : * In an ANTI join, we must ignore clauses that are "pushed down", since
4751 : * those won't affect the match logic. In a SEMI join, we do not
4752 : * distinguish joinquals from "pushed down" quals, so just use the whole
2193 tgl 4753 ECB : * restrictinfo list. For other outer join types, we should consider only
4754 : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
4755 : */
2193 tgl 4756 GIC 77000 : if (IS_OUTER_JOIN(jointype))
4757 : {
5083 4758 33349 : joinquals = NIL;
4090 4759 71566 : foreach(l, restrictlist)
4760 : {
2190 4761 38217 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
4762 :
1815 4763 38217 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5083 4764 35911 : joinquals = lappend(joinquals, rinfo);
4765 : }
4766 : }
4767 : else
4090 4768 43651 : joinquals = restrictlist;
4769 :
4770 : /*
4771 : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
4772 : */
5083 4773 77000 : jselec = clauselist_selectivity(root,
4774 : joinquals,
4775 : 0,
2118 tgl 4776 ECB : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
4777 : sjinfo);
5083 4778 :
4779 : /*
4780 : * Also get the normal inner-join selectivity of the join clauses.
4781 : */
5083 tgl 4782 GIC 77000 : norm_sjinfo.type = T_SpecialJoinInfo;
4090 tgl 4783 CBC 77000 : norm_sjinfo.min_lefthand = outerrel->relids;
4784 77000 : norm_sjinfo.min_righthand = innerrel->relids;
4090 tgl 4785 GIC 77000 : norm_sjinfo.syn_lefthand = outerrel->relids;
4786 77000 : norm_sjinfo.syn_righthand = innerrel->relids;
5083 4787 77000 : norm_sjinfo.jointype = JOIN_INNER;
69 tgl 4788 GNC 77000 : norm_sjinfo.ojrelid = 0;
4789 77000 : norm_sjinfo.commute_above_l = NULL;
4790 77000 : norm_sjinfo.commute_above_r = NULL;
4791 77000 : norm_sjinfo.commute_below = NULL;
5083 tgl 4792 ECB : /* we don't bother trying to make the remaining fields valid */
5083 tgl 4793 GIC 77000 : norm_sjinfo.lhs_strict = false;
2951 4794 77000 : norm_sjinfo.semi_can_btree = false;
4795 77000 : norm_sjinfo.semi_can_hash = false;
2951 tgl 4796 CBC 77000 : norm_sjinfo.semi_operators = NIL;
2951 tgl 4797 GIC 77000 : norm_sjinfo.semi_rhs_exprs = NIL;
4798 :
5083 4799 77000 : nselec = clauselist_selectivity(root,
4800 : joinquals,
4801 : 0,
4802 : JOIN_INNER,
4803 : &norm_sjinfo);
4804 :
5083 tgl 4805 ECB : /* Avoid leaking a lot of ListCells */
2193 tgl 4806 CBC 77000 : if (IS_OUTER_JOIN(jointype))
5083 4807 33349 : list_free(joinquals);
5083 tgl 4808 ECB :
4809 : /*
4810 : * jselec can be interpreted as the fraction of outer-rel rows that have
5050 bruce 4811 : * any matches (this is true for both SEMI and ANTI cases). And nselec is
3260 4812 : * the fraction of the Cartesian product that matches. So, the average
5050 4813 : * number of matches for each outer-rel row that has at least one match is
4814 : * nselec * inner_rows / jselec.
4815 : *
4090 tgl 4816 : * Note: it is correct to use the inner rel's "rows" count here, even
4817 : * though we might later be considering a parameterized inner path with
3260 bruce 4818 : * fewer rows. This is because we have included all the join clauses in
3955 4819 : * the selectivity estimate.
5083 tgl 4820 : */
5083 tgl 4821 GIC 77000 : if (jselec > 0) /* protect against zero divide */
5083 tgl 4822 ECB : {
4090 tgl 4823 GIC 76984 : avgmatch = nselec * innerrel->rows / jselec;
4824 : /* Clamp to sane range */
5083 4825 76984 : avgmatch = Max(1.0, avgmatch);
4826 : }
4827 : else
4828 16 : avgmatch = 1.0;
5083 tgl 4829 ECB :
4090 tgl 4830 CBC 77000 : semifactors->outer_match_frac = jselec;
4090 tgl 4831 GIC 77000 : semifactors->match_count = avgmatch;
4832 77000 : }
4833 :
4834 : /*
4835 : * has_indexed_join_quals
4836 : * Check whether all the joinquals of a nestloop join are used as
4837 : * inner index quals.
4838 : *
4839 : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
4840 : * indexscan) that uses all the joinquals as indexquals, we can assume that an
4841 : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
4842 : * expensive.
4843 : */
4090 tgl 4844 ECB : static bool
609 peter 4845 GIC 287577 : has_indexed_join_quals(NestPath *path)
4090 tgl 4846 ECB : {
609 peter 4847 GIC 287577 : JoinPath *joinpath = &path->jpath;
4007 tgl 4848 CBC 287577 : Relids joinrelids = joinpath->path.parent->relids;
4007 tgl 4849 GIC 287577 : Path *innerpath = joinpath->innerjoinpath;
4850 : List *indexclauses;
4007 tgl 4851 ECB : bool found_one;
4852 : ListCell *lc;
4853 :
4854 : /* If join still has quals to evaluate, it's not fast */
4007 tgl 4855 CBC 287577 : if (joinpath->joinrestrictinfo != NIL)
4007 tgl 4856 GIC 200782 : return false;
4857 : /* Nor if the inner path isn't parameterized at all */
4858 86795 : if (innerpath->param_info == NULL)
4859 2382 : return false;
4860 :
4861 : /* Find the indexclauses list for the inner scan */
4862 84413 : switch (innerpath->pathtype)
4863 : {
4864 54742 : case T_IndexScan:
4865 : case T_IndexOnlyScan:
4866 54742 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
4867 54742 : break;
4007 tgl 4868 CBC 135 : case T_BitmapHeapScan:
4869 : {
3955 bruce 4870 ECB : /* Accept only a simple bitmap scan, not AND/OR cases */
3955 bruce 4871 CBC 135 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
3955 bruce 4872 ECB :
3955 bruce 4873 GIC 135 : if (IsA(bmqual, IndexPath))
4874 111 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
4875 : else
4876 24 : return false;
4877 111 : break;
3955 bruce 4878 ECB : }
4007 tgl 4879 CBC 29536 : default:
4880 :
4007 tgl 4881 ECB : /*
4882 : * If it's not a simple indexscan, it probably doesn't run quickly
4883 : * for zero rows out, even if it's a parameterized path using all
4884 : * the joinquals.
4885 : */
4090 tgl 4886 GIC 29536 : return false;
5083 tgl 4887 ECB : }
4888 :
4007 4889 : /*
4890 : * Examine the inner path's param clauses. Any that are from the outer
4891 : * path must be found in the indexclauses list, either exactly or in an
4892 : * equivalent form generated by equivclass.c. Also, we must find at least
4893 : * one such clause, else it's a clauseless join which isn't fast.
4894 : */
4007 tgl 4895 GIC 54853 : found_one = false;
4007 tgl 4896 CBC 109422 : foreach(lc, innerpath->param_info->ppi_clauses)
4090 tgl 4897 ECB : {
4007 tgl 4898 GIC 55869 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
4007 tgl 4899 ECB :
4007 tgl 4900 CBC 55869 : if (join_clause_is_movable_into(rinfo,
4007 tgl 4901 GIC 55869 : innerpath->parent->relids,
4007 tgl 4902 ECB : joinrelids))
4903 : {
1520 tgl 4904 GIC 55869 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
4007 4905 1300 : return false;
4906 54569 : found_one = true;
4907 : }
4908 : }
4007 tgl 4909 CBC 53553 : return found_one;
4910 : }
4911 :
4912 :
4913 : /*
4914 : * approx_tuple_count
4915 : * Quick-and-dirty estimation of the number of join rows passing
4916 : * a set of qual conditions.
4917 : *
5349 tgl 4918 ECB : * The quals can be either an implicitly-ANDed list of boolean expressions,
4919 : * or a list of RestrictInfo nodes (typically the latter).
4920 : *
5175 4921 : * We intentionally compute the selectivity under JOIN_INNER rules, even
4922 : * if it's some type of outer join. This is appropriate because we are
4923 : * trying to figure out how many tuples pass the initial merge or hash
4924 : * join step.
4925 : *
4926 : * This is quick-and-dirty because we bypass clauselist_selectivity, and
7035 4927 : * simply multiply the independent clause selectivities together. Now
7709 4928 : * clauselist_selectivity often can't do any better than that anyhow, but
7035 4929 : * for some situations (such as range constraints) it is smarter. However,
4930 : * we can't effectively cache the results of clauselist_selectivity, whereas
4931 : * the individual clause selectivities can be and are cached.
7978 4932 : *
4933 : * Since we are only using the results to estimate how many potential
4934 : * output tuples are generated and passed through qpqual checking, it
4935 : * seems OK to live with the approximation.
4936 : */
4937 : static double
5175 tgl 4938 GIC 161284 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
4939 : {
4940 : double tuples;
4090 4941 161284 : double outer_tuples = path->outerjoinpath->rows;
4942 161284 : double inner_tuples = path->innerjoinpath->rows;
4943 : SpecialJoinInfo sjinfo;
5349 4944 161284 : Selectivity selec = 1.0;
4945 : ListCell *l;
4946 :
4947 : /*
4948 : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
4949 : */
5175 4950 161284 : sjinfo.type = T_SpecialJoinInfo;
4951 161284 : sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
4952 161284 : sjinfo.min_righthand = path->innerjoinpath->parent->relids;
4953 161284 : sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
4954 161284 : sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
4955 161284 : sjinfo.jointype = JOIN_INNER;
69 tgl 4956 GNC 161284 : sjinfo.ojrelid = 0;
4957 161284 : sjinfo.commute_above_l = NULL;
4958 161284 : sjinfo.commute_above_r = NULL;
4959 161284 : sjinfo.commute_below = NULL;
4960 : /* we don't bother trying to make the remaining fields valid */
5175 tgl 4961 GIC 161284 : sjinfo.lhs_strict = false;
2951 4962 161284 : sjinfo.semi_can_btree = false;
4963 161284 : sjinfo.semi_can_hash = false;
2951 tgl 4964 CBC 161284 : sjinfo.semi_operators = NIL;
2951 tgl 4965 GIC 161284 : sjinfo.semi_rhs_exprs = NIL;
4966 :
5349 tgl 4967 ECB : /* Get the approximate selectivity */
7978 tgl 4968 CBC 347354 : foreach(l, quals)
4969 : {
4970 186070 : Node *qual = (Node *) lfirst(l);
4971 :
4972 : /* Note that clause_selectivity will be able to cache its result */
2194 simon 4973 GIC 186070 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
4974 : }
4975 :
5175 tgl 4976 ECB : /* Apply it to the input relation sizes */
5175 tgl 4977 CBC 161284 : tuples = selec * outer_tuples * inner_tuples;
5349 tgl 4978 ECB :
5349 tgl 4979 CBC 161284 : return clamp_row_est(tuples);
7978 tgl 4980 ECB : }
4981 :
4982 :
9345 bruce 4983 : /*
8462 tgl 4984 : * set_baserel_size_estimates
4985 : * Set the size estimates for the given base relation.
4986 : *
4987 : * The rel's targetlist and restrictinfo list must have been constructed
4524 4988 : * already, and rel->tuples must be set.
8462 4989 : *
4990 : * We set the following fields of the rel node:
4991 : * rows: the estimated number of output tuples (after applying
4992 : * restriction clauses).
4993 : * width: the estimated average output tuple width in bytes.
8454 4994 : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
4995 : */
8491 4996 : void
6517 tgl 4997 GIC 192405 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4998 : {
7034 tgl 4999 ECB : double nrows;
5000 :
5001 : /* Should only be applied to base relations */
7365 tgl 5002 GIC 192405 : Assert(rel->relid > 0);
9345 bruce 5003 ECB :
7034 tgl 5004 GIC 384798 : nrows = rel->tuples *
7035 tgl 5005 CBC 192405 : clauselist_selectivity(root,
5006 : rel->baserestrictinfo,
5007 : 0,
5008 : JOIN_INNER,
5009 : NULL);
5010 :
7034 tgl 5011 GIC 192393 : rel->rows = clamp_row_est(nrows);
5012 :
5890 5013 192393 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5014 :
8491 5015 192393 : set_rel_width(root, rel);
9770 scrappy 5016 192393 : }
5017 :
5018 : /*
5019 : * get_parameterized_baserel_size
5020 : * Make a size estimate for a parameterized scan of a base relation.
5021 : *
5022 : * 'param_clauses' lists the additional join clauses to be used.
4007 tgl 5023 ECB : *
5024 : * set_baserel_size_estimates must have been applied already.
5025 : */
5026 : double
4007 tgl 5027 GIC 52958 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
4007 tgl 5028 ECB : List *param_clauses)
5029 : {
5030 : List *allclauses;
5031 : double nrows;
5032 :
5033 : /*
5034 : * Estimate the number of rows returned by the parameterized scan, knowing
5035 : * that it will apply all the extra join clauses as well as the rel's own
5036 : * restriction clauses. Note that we force the clauses to be treated as
5037 : * non-join clauses during selectivity estimation.
5038 : */
1336 tgl 5039 CBC 52958 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
4007 tgl 5040 GIC 105916 : nrows = rel->tuples *
4007 tgl 5041 CBC 52958 : clauselist_selectivity(root,
4007 tgl 5042 ECB : allclauses,
2118 tgl 5043 GIC 52958 : rel->relid, /* do not use 0! */
5044 : JOIN_INNER,
5045 : NULL);
4007 5046 52958 : nrows = clamp_row_est(nrows);
5047 : /* For safety, make sure result is not more than the base estimate */
5048 52958 : if (nrows > rel->rows)
4007 tgl 5049 UIC 0 : nrows = rel->rows;
4007 tgl 5050 GIC 52958 : return nrows;
5051 : }
5052 :
9345 bruce 5053 ECB : /*
5054 : * set_joinrel_size_estimates
5055 : * Set the size estimates for the given join relation.
5056 : *
5057 : * The rel's targetlist must have been constructed already, and a
5058 : * restriction clause list that matches the given component rels must
5059 : * be provided.
5060 : *
5061 : * Since there is more than one way to make a joinrel for more than two
5062 : * base relations, the results we get here could depend on which component
5063 : * rel pair is provided. In theory we should get the same answers no matter
5064 : * which pair is provided; in practice, since the selectivity estimation
8462 tgl 5065 : * routines don't handle all cases equally well, we might not. But there's
5066 : * not much to be done about it. (Would it make sense to repeat the
5067 : * calculations for each pair of input rels that's encountered, and somehow
5068 : * average the results? Probably way more trouble than it's worth, and
4007 5069 : * anyway we must keep the rowcount estimate the same for all paths for the
5070 : * joinrel.)
5071 : *
2607 5072 : * We set only the rows field here. The reltarget field was already set by
5073 : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
9770 scrappy 5074 : */
8491 tgl 5075 EUB : void
6517 tgl 5076 CBC 76713 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5077 : RelOptInfo *outer_rel,
5078 : RelOptInfo *inner_rel,
5079 : SpecialJoinInfo *sjinfo,
5080 : List *restrictlist)
5081 : {
4090 tgl 5082 GIC 76713 : rel->rows = calc_joinrel_size_estimate(root,
5083 : rel,
5084 : outer_rel,
5085 : inner_rel,
5086 : outer_rel->rows,
5087 : inner_rel->rows,
5088 : sjinfo,
5089 : restrictlist);
5090 76713 : }
5091 :
5092 : /*
5093 : * get_parameterized_joinrel_size
5094 : * Make a size estimate for a parameterized scan of a join relation.
5095 : *
5096 : * 'rel' is the joinrel under consideration.
5097 : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5098 : * produce the relations being joined.
5099 : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5100 : * 'restrict_clauses' lists the join clauses that need to be applied at the
5101 : * join node (including any movable clauses that were moved down to this join,
4007 tgl 5102 ECB : * and not including any movable clauses that were pushed down into the
5103 : * child paths).
5104 : *
5105 : * set_joinrel_size_estimates must have been applied already.
5106 : */
5107 : double
4007 tgl 5108 CBC 2706 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5109 : Path *outer_path,
5110 : Path *inner_path,
5111 : SpecialJoinInfo *sjinfo,
5112 : List *restrict_clauses)
5113 : {
5114 : double nrows;
5115 :
4007 tgl 5116 ECB : /*
5117 : * Estimate the number of rows returned by the parameterized join as the
5118 : * sizes of the input paths times the selectivity of the clauses that have
5119 : * ended up at this join node.
5120 : *
5121 : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5122 : * on the pair of input paths provided, though ideally we'd get the same
5123 : * estimate for any pair with the same parameterization.
5124 : */
4007 tgl 5125 GIC 2706 : nrows = calc_joinrel_size_estimate(root,
5126 : rel,
5127 : outer_path->parent,
5128 : inner_path->parent,
5129 : outer_path->rows,
5130 : inner_path->rows,
5131 : sjinfo,
5132 : restrict_clauses);
5133 : /* For safety, make sure result is not more than the base estimate */
4007 tgl 5134 CBC 2706 : if (nrows > rel->rows)
4007 tgl 5135 GIC 6 : nrows = rel->rows;
5136 2706 : return nrows;
5137 : }
5138 :
5139 : /*
5140 : * calc_joinrel_size_estimate
5141 : * Workhorse for set_joinrel_size_estimates and
5142 : * get_parameterized_joinrel_size.
5143 : *
5144 : * outer_rel/inner_rel are the relations being joined, but they should be
5145 : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5146 : * than what rel->rows says, when we are considering parameterized paths.
5147 : */
5148 : static double
4090 5149 79419 : calc_joinrel_size_estimate(PlannerInfo *root,
5150 : RelOptInfo *joinrel,
2486 tgl 5151 ECB : RelOptInfo *outer_rel,
5152 : RelOptInfo *inner_rel,
5153 : double outer_rows,
5154 : double inner_rows,
5155 : SpecialJoinInfo *sjinfo,
5156 : List *restrictlist)
5157 : {
5351 tgl 5158 CBC 79419 : JoinType jointype = sjinfo->jointype;
2486 tgl 5159 ECB : Selectivity fkselec;
5994 5160 : Selectivity jselec;
5161 : Selectivity pselec;
5162 : double nrows;
5163 :
5164 : /*
5165 : * Compute joinclause selectivity. Note that we are only considering
5166 : * clauses that become restriction clauses at this join level; we are not
5167 : * double-counting them because they were not considered in estimating the
5168 : * sizes of the component rels.
5169 : *
5170 : * First, see whether any of the joinclauses can be matched to known FK
5171 : * constraints. If so, drop those clauses from the restrictlist, and
5172 : * instead estimate their selectivity using FK semantics. (We do this
2486 5173 : * without regard to whether said clauses are local or "pushed down".
5174 : * Probably, an FK-matching clause could never be seen as pushed down at
5175 : * an outer join, since it would be strict and hence would be grounds for
5176 : * join strength reduction.) fkselec gets the net selectivity for
5177 : * FK-matching clauses, or 1.0 if there are none.
5178 : */
2486 tgl 5179 GIC 79419 : fkselec = get_foreign_key_join_selectivity(root,
5180 : outer_rel->relids,
5181 : inner_rel->relids,
2486 tgl 5182 ECB : sjinfo,
5183 : &restrictlist);
5184 :
5185 : /*
5186 : * For an outer join, we have to distinguish the selectivity of the join's
5187 : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5188 : * down". For inner joins we just count them all as joinclauses.
5189 : */
5994 tgl 5190 GIC 79419 : if (IS_OUTER_JOIN(jointype))
5191 : {
5192 30975 : List *joinquals = NIL;
5193 30975 : List *pushedquals = NIL;
5194 : ListCell *l;
5195 :
5196 : /* Grovel through the clauses to separate into two lists */
5197 68743 : foreach(l, restrictlist)
5198 : {
2190 5199 37768 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5200 :
1815 5201 37768 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5994 5202 3341 : pushedquals = lappend(pushedquals, rinfo);
5994 tgl 5203 ECB : else
5994 tgl 5204 GIC 34427 : joinquals = lappend(joinquals, rinfo);
5205 : }
5206 :
5207 : /* Get the separate selectivities */
2497 5208 30975 : jselec = clauselist_selectivity(root,
5209 : joinquals,
5210 : 0,
5211 : jointype,
5212 : sjinfo);
5994 5213 30975 : pselec = clauselist_selectivity(root,
5994 tgl 5214 ECB : pushedquals,
5215 : 0,
5351 5216 : jointype,
2194 simon 5217 : sjinfo);
5218 :
5219 : /* Avoid leaking a lot of ListCells */
5994 tgl 5220 GIC 30975 : list_free(joinquals);
5994 tgl 5221 CBC 30975 : list_free(pushedquals);
5222 : }
5994 tgl 5223 ECB : else
5224 : {
2497 tgl 5225 CBC 48444 : jselec = clauselist_selectivity(root,
2497 tgl 5226 ECB : restrictlist,
5227 : 0,
5228 : jointype,
5229 : sjinfo);
5994 tgl 5230 GIC 48444 : pselec = 0.0; /* not used, keep compiler quiet */
5231 : }
8491 tgl 5232 ECB :
5233 : /*
5234 : * Basically, we multiply size of Cartesian product by selectivity.
5235 : *
5236 : * If we are doing an outer join, take that into account: the joinqual
5994 5237 : * selectivity has to be clamped using the knowledge that the output must
5238 : * be at least as large as the non-nullable input. However, any
5239 : * pushed-down quals are applied after the outer join, so their
5240 : * selectivity applies fully.
5241 : *
5242 : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5243 : * of LHS rows that have matches, and we apply that straightforwardly.
8087 5244 : */
8087 tgl 5245 CBC 79419 : switch (jointype)
5246 : {
8087 tgl 5247 GIC 46696 : case JOIN_INNER:
2486 5248 46696 : nrows = outer_rows * inner_rows * fkselec * jselec;
2486 tgl 5249 ECB : /* pselec not used */
8087 tgl 5250 GIC 46696 : break;
5251 26807 : case JOIN_LEFT:
2486 5252 26807 : nrows = outer_rows * inner_rows * fkselec * jselec;
4090 5253 26807 : if (nrows < outer_rows)
4090 tgl 5254 CBC 10778 : nrows = outer_rows;
5994 tgl 5255 GIC 26807 : nrows *= pselec;
8087 5256 26807 : break;
5257 782 : case JOIN_FULL:
2486 5258 782 : nrows = outer_rows * inner_rows * fkselec * jselec;
4090 5259 782 : if (nrows < outer_rows)
5260 516 : nrows = outer_rows;
5261 782 : if (nrows < inner_rows)
5262 55 : nrows = inner_rows;
5994 5263 782 : nrows *= pselec;
8087 5264 782 : break;
5351 5265 1748 : case JOIN_SEMI:
2486 5266 1748 : nrows = outer_rows * fkselec * jselec;
5267 : /* pselec not used */
7384 5268 1748 : break;
5351 tgl 5269 CBC 3386 : case JOIN_ANTI:
2486 tgl 5270 GIC 3386 : nrows = outer_rows * (1.0 - fkselec * jselec);
5351 tgl 5271 CBC 3386 : nrows *= pselec;
7384 5272 3386 : break;
8087 tgl 5273 UIC 0 : default:
5351 tgl 5274 ECB : /* other values not expected here */
7198 tgl 5275 LBC 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
7034 tgl 5276 ECB : nrows = 0; /* keep compiler quiet */
8087 5277 : break;
5278 : }
5279 :
4090 tgl 5280 CBC 79419 : return clamp_row_est(nrows);
7034 tgl 5281 ECB : }
5282 :
2486 5283 : /*
5284 : * get_foreign_key_join_selectivity
5285 : * Estimate join selectivity for foreign-key-related clauses.
5286 : *
5287 : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5288 : * and return a substitute estimate of their selectivity. 1.0 is returned
5289 : * when there are no such clauses.
5290 : *
5291 : * The reason for treating such clauses specially is that we can get better
5292 : * estimates this way than by relying on clauselist_selectivity(), especially
5293 : * for multi-column FKs where that function's assumption that the clauses are
5294 : * independent falls down badly. But even with single-column FKs, we may be
5295 : * able to get a better answer when the pg_statistic stats are missing or out
5296 : * of date.
2486 tgl 5297 EUB : */
5298 : static Selectivity
2486 tgl 5299 GBC 79419 : get_foreign_key_join_selectivity(PlannerInfo *root,
5300 : Relids outer_relids,
5301 : Relids inner_relids,
5302 : SpecialJoinInfo *sjinfo,
5303 : List **restrictlist)
2486 tgl 5304 ECB : {
2486 tgl 5305 GIC 79419 : Selectivity fkselec = 1.0;
5306 79419 : JoinType jointype = sjinfo->jointype;
5307 79419 : List *worklist = *restrictlist;
5308 : ListCell *lc;
5309 :
5310 : /* Consider each FK constraint that is known to match the query */
5311 80363 : foreach(lc, root->fkey_list)
5312 : {
5313 944 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5314 : bool ref_is_outer;
5315 : List *removedlist;
5316 : ListCell *cell;
5317 :
5318 : /*
5319 : * This FK is not relevant unless it connects a baserel on one side of
5320 : * this join to a baserel on the other side.
5321 : */
5322 1715 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
2486 tgl 5323 CBC 771 : bms_is_member(fkinfo->ref_relid, inner_relids))
2486 tgl 5324 GIC 675 : ref_is_outer = false;
5325 442 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5326 173 : bms_is_member(fkinfo->con_relid, inner_relids))
5327 62 : ref_is_outer = true;
5328 : else
2486 tgl 5329 CBC 207 : continue;
2486 tgl 5330 ECB :
2120 5331 : /*
5332 : * If we're dealing with a semi/anti join, and the FK's referenced
5333 : * relation is on the outside, then knowledge of the FK doesn't help
5334 : * us figure out what we need to know (which is the fraction of outer
5335 : * rows that have matches). On the other hand, if the referenced rel
5336 : * is on the inside, then all outer rows must have matches in the
5337 : * referenced table (ignoring nulls). But any restriction or join
5338 : * clauses that filter that table will reduce the fraction of matches.
5339 : * We can account for restriction clauses, but it's too hard to guess
5340 : * how many table rows would get through a join that's inside the RHS.
5341 : * Hence, if either case applies, punt and ignore the FK.
5342 : */
2120 tgl 5343 GIC 737 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
5344 479 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5345 3 : continue;
2120 tgl 5346 ECB :
2486 5347 : /*
5348 : * Modify the restrictlist by removing clauses that match the FK (and
5349 : * putting them into removedlist instead). It seems unsafe to modify
5350 : * the originally-passed List structure, so we make a shallow copy the
5351 : * first time through.
5352 : */
2486 tgl 5353 CBC 734 : if (worklist == *restrictlist)
2486 tgl 5354 GIC 622 : worklist = list_copy(worklist);
5355 :
5356 734 : removedlist = NIL;
1364 5357 1506 : foreach(cell, worklist)
5358 : {
2486 5359 772 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5360 772 : bool remove_it = false;
5361 : int i;
5362 :
5363 : /* Drop this clause if it matches any column of the FK */
5364 965 : for (i = 0; i < fkinfo->nkeys; i++)
5365 : {
5366 950 : if (rinfo->parent_ec)
2486 tgl 5367 ECB : {
5368 : /*
5369 : * EC-derived clauses can only match by EC. It is okay to
5370 : * consider any clause derived from the same EC as
5371 : * matching the FK: even if equivclass.c chose to generate
5372 : * a clause equating some other pair of Vars, it could
5373 : * have generated one equating the FK's Vars. So for
5374 : * purposes of estimation, we can act as though it did so.
5375 : *
5376 : * Note: checking parent_ec is a bit of a cheat because
5377 : * there are EC-derived clauses that don't have parent_ec
5378 : * set; but such clauses must compare expressions that
5379 : * aren't just Vars, so they cannot match the FK anyway.
5380 : */
2486 tgl 5381 CBC 152 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5382 : {
5383 149 : remove_it = true;
5384 149 : break;
5385 : }
5386 : }
5387 : else
2486 tgl 5388 ECB : {
5389 : /*
5390 : * Otherwise, see if rinfo was previously matched to FK as
5391 : * a "loose" clause.
5392 : */
2486 tgl 5393 GIC 798 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5394 : {
5395 608 : remove_it = true;
5396 608 : break;
5397 : }
5398 : }
5399 : }
5400 772 : if (remove_it)
5401 : {
1364 5402 757 : worklist = foreach_delete_current(worklist, cell);
2486 5403 757 : removedlist = lappend(removedlist, rinfo);
5404 : }
2486 tgl 5405 ECB : }
5406 :
5407 : /*
5408 : * If we failed to remove all the matching clauses we expected to
5409 : * find, chicken out and ignore this FK; applying its selectivity
5410 : * might result in double-counting. Put any clauses we did manage to
5411 : * remove back into the worklist.
5412 : *
5413 : * Since the matching clauses are known not outerjoin-delayed, they
5414 : * would normally have appeared in the initial joinclause list. If we
5415 : * didn't find them, there are two possibilities:
5416 : *
893 5417 : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5418 : * have generated any join clauses at all. We discount such ECs while
5419 : * checking to see if we have "all" the clauses. (Below, we'll adjust
5420 : * the selectivity estimate for this case.)
5421 : *
5422 : * 2. The clauses were matched to some other FK in a previous
5423 : * iteration of this loop, and thus removed from worklist. (A likely
2486 5424 : * case is that two FKs are matched to the same EC; there will be only
5425 : * one EC-derived clause in the initial list, so the first FK will
5426 : * consume it.) Applying both FKs' selectivity independently risks
5427 : * underestimating the join size; in particular, this would undo one
5428 : * of the main things that ECs were invented for, namely to avoid
5429 : * double-counting the selectivity of redundant equality conditions.
5430 : * Later we might think of a reasonable way to combine the estimates,
5431 : * but for now, just punt, since this is a fairly uncommon situation.
5432 : */
893 tgl 5433 GIC 734 : if (removedlist == NIL ||
5434 591 : list_length(removedlist) !=
5435 591 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5436 : {
2486 5437 143 : worklist = list_concat(worklist, removedlist);
5438 143 : continue;
5439 : }
5440 :
5441 : /*
5442 : * Finally we get to the payoff: estimate selectivity using the
5443 : * knowledge that each referencing row will match exactly one row in
5444 : * the referenced table.
5445 : *
5446 : * XXX that's not true in the presence of nulls in the referencing
5447 : * column(s), so in principle we should derate the estimate for those.
5448 : * However (1) if there are any strict restriction clauses for the
5449 : * referencing column(s) elsewhere in the query, derating here would
5450 : * be double-counting the null fraction, and (2) it's not very clear
5451 : * how to combine null fractions for multiple referencing columns. So
5452 : * we do nothing for now about correcting for nulls.
5453 : *
5454 : * XXX another point here is that if either side of an FK constraint
5455 : * is an inheritance parent, we estimate as though the constraint
5456 : * covers all its children as well. This is not an unreasonable
2486 tgl 5457 ECB : * assumption for a referencing table, ie the user probably applied
5458 : * identical constraints to all child tables (though perhaps we ought
5459 : * to check that). But it's not possible to have done that for a
5460 : * referenced table. Fortunately, precisely because that doesn't
5461 : * work, it is uncommon in practice to have an FK referencing a parent
5462 : * table. So, at least for now, disregard inheritance here.
5463 : */
2120 tgl 5464 GIC 591 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2486 5465 367 : {
5466 : /*
5467 : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5468 : * referenced table is exactly the inside of the join. The join
5469 : * selectivity is defined as the fraction of LHS rows that have
5470 : * matches. The FK implies that every LHS row has a match *in the
5471 : * referenced table*; but any restriction clauses on it will
5472 : * reduce the number of matches. Hence we take the join
5473 : * selectivity as equal to the selectivity of the table's
5474 : * restriction clauses, which is rows / tuples; but we must guard
5475 : * against tuples == 0.
5476 : */
2120 5477 367 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5478 367 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5479 :
5480 367 : fkselec *= ref_rel->rows / ref_tuples;
5481 : }
5482 : else
5483 : {
5484 : /*
5485 : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5486 : * guard against tuples == 0. Note we should use the raw table
5487 : * tuple count, not any estimate of its filtered or joined size.
2486 tgl 5488 ECB : */
2486 tgl 5489 CBC 224 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
2486 tgl 5490 GIC 224 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5491 :
5492 224 : fkselec *= 1.0 / ref_tuples;
5493 : }
5494 :
5495 : /*
5496 : * If any of the FK columns participated in ec_has_const ECs, then
5497 : * equivclass.c will have generated "var = const" restrictions for
5498 : * each side of the join, thus reducing the sizes of both input
5499 : * relations. Taking the fkselec at face value would amount to
5500 : * double-counting the selectivity of the constant restriction for the
893 tgl 5501 ECB : * referencing Var. Hence, look for the restriction clause(s) that
5502 : * were applied to the referencing Var(s), and divide out their
5503 : * selectivity to correct for this.
5504 : */
893 tgl 5505 GIC 591 : if (fkinfo->nconst_ec > 0)
5506 : {
5507 12 : for (int i = 0; i < fkinfo->nkeys; i++)
5508 : {
5509 9 : EquivalenceClass *ec = fkinfo->eclass[i];
5510 :
5511 9 : if (ec && ec->ec_has_const)
5512 : {
893 tgl 5513 CBC 3 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5514 3 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
5515 : em);
893 tgl 5516 ECB :
893 tgl 5517 GIC 3 : if (rinfo)
5518 : {
5519 : Selectivity s0;
5520 :
5521 3 : s0 = clause_selectivity(root,
5522 : (Node *) rinfo,
5523 : 0,
5524 : jointype,
5525 : sjinfo);
5526 3 : if (s0 > 0)
5527 3 : fkselec /= s0;
5528 : }
893 tgl 5529 ECB : }
5530 : }
5531 : }
5532 : }
2486 5533 :
2486 tgl 5534 GIC 79419 : *restrictlist = worklist;
893 tgl 5535 CBC 79419 : CLAMP_PROBABILITY(fkselec);
2486 tgl 5536 GIC 79419 : return fkselec;
2486 tgl 5537 ECB : }
5538 :
5539 : /*
5540 : * set_subquery_size_estimates
4524 5541 : * Set the size estimates for a base relation that is a subquery.
5542 : *
5543 : * The rel's targetlist and restrictinfo list must have been constructed
5544 : * already, and the Paths for the subquery must have been completed.
2589 5545 : * We look at the subquery's PlannerInfo to extract data.
5546 : *
5547 : * We set the same fields as set_baserel_size_estimates.
5548 : */
5549 : void
4236 tgl 5550 CBC 10277 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
4524 tgl 5551 ECB : {
4236 tgl 5552 GIC 10277 : PlannerInfo *subroot = rel->subroot;
5553 : RelOptInfo *sub_final_rel;
5554 : ListCell *lc;
5555 :
5556 : /* Should only be applied to base relations that are subqueries */
4524 5557 10277 : Assert(rel->relid > 0);
2026 andrew 5558 CBC 10277 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
4524 tgl 5559 ECB :
2589 5560 : /*
5561 : * Copy raw number of output rows from subquery. All of its paths should
5562 : * have the same output rowcount, so just look at cheapest-total.
5563 : */
2589 tgl 5564 GIC 10277 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5565 10277 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5566 :
5567 : /*
5568 : * Compute per-output-column width estimates by examining the subquery's
5569 : * targetlist. For any output that is a plain Var, get the width estimate
5570 : * that was made while planning the subquery. Otherwise, we leave it to
5571 : * set_rel_width to fill in a datatype-based default estimate.
5572 : */
4524 5573 41248 : foreach(lc, subroot->parse->targetList)
4524 tgl 5574 ECB : {
2190 tgl 5575 GIC 30971 : TargetEntry *te = lfirst_node(TargetEntry, lc);
4524 tgl 5576 CBC 30971 : Node *texpr = (Node *) te->expr;
4247 tgl 5577 GIC 30971 : int32 item_width = 0;
5578 :
5579 : /* junk columns aren't visible to upper query */
4524 5580 30971 : if (te->resjunk)
4524 tgl 5581 CBC 829 : continue;
4524 tgl 5582 ECB :
5583 : /*
5584 : * The subquery could be an expansion of a view that's had columns
5585 : * added to it since the current query was parsed, so that there are
5586 : * non-junk tlist columns in it that don't correspond to any column
5587 : * visible at our query level. Ignore such columns.
3661 5588 : */
3661 tgl 5589 CBC 30142 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
3661 tgl 5590 UIC 0 : continue;
5591 :
5592 : /*
5593 : * XXX This currently doesn't work for subqueries containing set
5594 : * operations, because the Vars in their tlists are bogus references
5595 : * to the first leaf subquery, which wouldn't give the right answer
5596 : * even if we could still get to its PlannerInfo.
4247 tgl 5597 ECB : *
5598 : * Also, the subquery could be an appendrel for which all branches are
5599 : * known empty due to constraint exclusion, in which case
5600 : * set_append_rel_pathlist will have left the attr_widths set to zero.
5601 : *
5602 : * In either case, we just leave the width estimate zero until
5603 : * set_rel_width fixes it.
4524 5604 : */
4524 tgl 5605 CBC 30142 : if (IsA(texpr, Var) &&
4524 tgl 5606 GIC 12318 : subroot->parse->setOperations == NULL)
5607 : {
4382 bruce 5608 11405 : Var *var = (Var *) texpr;
4524 tgl 5609 11405 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5610 :
5611 11405 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5612 : }
4524 tgl 5613 CBC 30142 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
4524 tgl 5614 EUB : }
5615 :
5616 : /* Now estimate number of output rows, etc */
4524 tgl 5617 GIC 10277 : set_baserel_size_estimates(root, rel);
5618 10277 : }
5619 :
5620 : /*
5621 : * set_function_size_estimates
5622 : * Set the size estimates for a base relation that is a function call.
5623 : *
5624 : * The rel's targetlist and restrictinfo list must have been constructed
5625 : * already.
5626 : *
5627 : * We set the same fields as set_baserel_size_estimates.
5628 : */
7637 tgl 5629 ECB : void
6517 tgl 5630 CBC 17699 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5631 : {
6395 tgl 5632 ECB : RangeTblEntry *rte;
3426 5633 : ListCell *lc;
5634 :
7637 5635 : /* Should only be applied to base relations that are functions */
7365 tgl 5636 GIC 17699 : Assert(rel->relid > 0);
5832 tgl 5637 CBC 17699 : rte = planner_rt_fetch(rel->relid, root);
6395 tgl 5638 GIC 17699 : Assert(rte->rtekind == RTE_FUNCTION);
5639 :
5640 : /*
3426 tgl 5641 ECB : * Estimate number of rows the functions will return. The rowcount of the
5642 : * node is that of the largest function result.
5643 : */
3426 tgl 5644 GIC 17699 : rel->tuples = 0;
5645 35554 : foreach(lc, rte->functions)
5646 : {
5647 17855 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
1520 5648 17855 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
5649 :
3426 5650 17855 : if (ntup > rel->tuples)
5651 17711 : rel->tuples = ntup;
5652 : }
5653 :
7034 tgl 5654 ECB : /* Now estimate number of output rows, etc */
7034 tgl 5655 GIC 17699 : set_baserel_size_estimates(root, rel);
7637 5656 17699 : }
5657 :
5658 : /*
5659 : * set_function_size_estimates
2223 alvherre 5660 ECB : * Set the size estimates for a base relation that is a function call.
5661 : *
5662 : * The rel's targetlist and restrictinfo list must have been constructed
5663 : * already.
5664 : *
5665 : * We set the same fields as set_tablefunc_size_estimates.
5666 : */
5667 : void
2223 alvherre 5668 CBC 108 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
2223 alvherre 5669 ECB : {
5670 : /* Should only be applied to base relations that are functions */
2223 alvherre 5671 CBC 108 : Assert(rel->relid > 0);
2026 andrew 5672 108 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
5673 :
2223 alvherre 5674 108 : rel->tuples = 100;
2223 alvherre 5675 ECB :
5676 : /* Now estimate number of output rows, etc */
2223 alvherre 5677 GIC 108 : set_baserel_size_estimates(root, rel);
5678 108 : }
2223 alvherre 5679 ECB :
6094 mail 5680 : /*
5681 : * set_values_size_estimates
5682 : * Set the size estimates for a base relation that is a values list.
5683 : *
5684 : * The rel's targetlist and restrictinfo list must have been constructed
5685 : * already.
5686 : *
5687 : * We set the same fields as set_baserel_size_estimates.
5688 : */
5689 : void
6094 mail 5690 GIC 3553 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5691 : {
6094 mail 5692 ECB : RangeTblEntry *rte;
5693 :
5694 : /* Should only be applied to base relations that are values lists */
6094 mail 5695 CBC 3553 : Assert(rel->relid > 0);
5832 tgl 5696 3553 : rte = planner_rt_fetch(rel->relid, root);
6094 mail 5697 GIC 3553 : Assert(rte->rtekind == RTE_VALUES);
6094 mail 5698 ECB :
5699 : /*
5700 : * Estimate number of rows the values list will return. We know this
6031 bruce 5701 : * precisely based on the list length (well, barring set-returning
5702 : * functions in list items, but that's a refinement not catered for
5703 : * anywhere else either).
5704 : */
6094 mail 5705 GIC 3553 : rel->tuples = list_length(rte->values_lists);
5706 :
5707 : /* Now estimate number of output rows, etc */
5708 3553 : set_baserel_size_estimates(root, rel);
5709 3553 : }
5710 :
5711 : /*
5712 : * set_cte_size_estimates
5713 : * Set the size estimates for a base relation that is a CTE reference.
5300 tgl 5714 ECB : *
5715 : * The rel's targetlist and restrictinfo list must have been constructed
5716 : * already, and we need an estimate of the number of rows returned by the CTE
5717 : * (if a regular CTE) or the non-recursive term (if a self-reference).
5718 : *
5719 : * We set the same fields as set_baserel_size_estimates.
5720 : */
5721 : void
2589 tgl 5722 GIC 1597 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
5723 : {
5724 : RangeTblEntry *rte;
5725 :
5726 : /* Should only be applied to base relations that are CTE references */
5300 5727 1597 : Assert(rel->relid > 0);
5728 1597 : rte = planner_rt_fetch(rel->relid, root);
5300 tgl 5729 CBC 1597 : Assert(rte->rtekind == RTE_CTE);
5730 :
5300 tgl 5731 GIC 1597 : if (rte->self_reference)
5300 tgl 5732 ECB : {
5733 : /*
5734 : * In a self-reference, we assume the average worktable size is a
5735 : * multiple of the nonrecursive term's size. The best multiplier will
5736 : * vary depending on query "fan-out", so make its value adjustable.
5737 : */
381 tgl 5738 GIC 357 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
5739 : }
5740 : else
5741 : {
5742 : /* Otherwise just believe the CTE's rowcount estimate */
2589 5743 1240 : rel->tuples = cte_rows;
5744 : }
5745 :
5300 tgl 5746 ECB : /* Now estimate number of output rows, etc */
5300 tgl 5747 GIC 1597 : set_baserel_size_estimates(root, rel);
5748 1597 : }
5749 :
5750 : /*
2200 kgrittn 5751 ECB : * set_namedtuplestore_size_estimates
5752 : * Set the size estimates for a base relation that is a tuplestore reference.
5753 : *
5754 : * The rel's targetlist and restrictinfo list must have been constructed
5755 : * already.
5756 : *
5757 : * We set the same fields as set_baserel_size_estimates.
5758 : */
5759 : void
2200 kgrittn 5760 GIC 219 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5761 : {
2200 kgrittn 5762 ECB : RangeTblEntry *rte;
5763 :
5764 : /* Should only be applied to base relations that are tuplestore references */
2200 kgrittn 5765 GIC 219 : Assert(rel->relid > 0);
5766 219 : rte = planner_rt_fetch(rel->relid, root);
2200 kgrittn 5767 CBC 219 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
5768 :
5769 : /*
5770 : * Use the estimate provided by the code which is generating the named
2200 kgrittn 5771 ECB : * tuplestore. In some cases, the actual number might be available; in
5772 : * others the same plan will be re-used, so a "typical" value might be
5773 : * estimated and used.
5774 : */
2200 kgrittn 5775 GIC 219 : rel->tuples = rte->enrtuples;
5776 219 : if (rel->tuples < 0)
2200 kgrittn 5777 UIC 0 : rel->tuples = 1000;
5778 :
5779 : /* Now estimate number of output rows, etc */
2200 kgrittn 5780 GIC 219 : set_baserel_size_estimates(root, rel);
5781 219 : }
5782 :
5783 : /*
1532 tgl 5784 ECB : * set_result_size_estimates
5785 : * Set the size estimates for an RTE_RESULT base relation
5786 : *
5787 : * The rel's targetlist and restrictinfo list must have been constructed
5788 : * already.
5789 : *
5790 : * We set the same fields as set_baserel_size_estimates.
5791 : */
5792 : void
1532 tgl 5793 GIC 661 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5794 : {
5795 : /* Should only be applied to RTE_RESULT base relations */
5796 661 : Assert(rel->relid > 0);
5797 661 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
5798 :
1532 tgl 5799 ECB : /* RTE_RESULT always generates a single row, natively */
1532 tgl 5800 CBC 661 : rel->tuples = 1;
1532 tgl 5801 EUB :
5802 : /* Now estimate number of output rows, etc */
1532 tgl 5803 GIC 661 : set_baserel_size_estimates(root, rel);
1532 tgl 5804 CBC 661 : }
1532 tgl 5805 ECB :
5806 : /*
5807 : * set_foreign_size_estimates
5808 : * Set the size estimates for a base relation that is a foreign table.
5809 : *
5810 : * There is not a whole lot that we can do here; the foreign-data wrapper
5811 : * is responsible for producing useful estimates. We can do a decent job
5812 : * of estimating baserestrictcost, so we set that, and we also set up width
5813 : * using what will be purely datatype-driven estimates from the targetlist.
5814 : * There is no way to do anything sane with the rows value, so we just put
5815 : * a default estimate and hope that the wrapper can improve on it. The
5816 : * wrapper's GetForeignRelSize function will be called momentarily.
4431 5817 : *
5818 : * The rel's targetlist and restrictinfo list must have been constructed
5819 : * already.
5820 : */
5821 : void
4431 tgl 5822 GIC 1099 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5823 : {
4431 tgl 5824 ECB : /* Should only be applied to base relations */
4431 tgl 5825 GIC 1099 : Assert(rel->relid > 0);
5826 :
4431 tgl 5827 CBC 1099 : rel->rows = 1000; /* entirely bogus default estimate */
4431 tgl 5828 ECB :
4431 tgl 5829 GIC 1099 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5830 :
5831 1099 : set_rel_width(root, rel);
5832 1099 : }
5833 :
5834 :
5835 : /*
5836 : * set_rel_width
5837 : * Set the estimated output width of a base relation.
5838 : *
5839 : * The estimated output width is the sum of the per-attribute width estimates
5840 : * for the actually-referenced columns, plus any PHVs or other expressions
5841 : * that have to be calculated at this relation. This is the amount of data
5842 : * we'd need to pass upwards in case of a sort, hash, etc.
5843 : *
5844 : * This function also sets reltarget->cost, so it's a bit misnamed now.
5845 : *
7224 tgl 5846 ECB : * NB: this works best on plain relations because it prefers to look at
5847 : * real Vars. For subqueries, set_subquery_size_estimates will already have
5848 : * copied up whatever per-column estimates were made within the subquery,
4524 5849 : * and for other types of rels there isn't much we can do anyway. We fall
5850 : * back on (fairly stupid) datatype-based width estimates if we can't get
5851 : * any better number.
5852 : *
7224 5853 : * The per-attribute width estimates are cached for possible re-use while
5854 : * building join relations or post-scan/join pathtargets.
9770 scrappy 5855 : */
8491 tgl 5856 : static void
6517 tgl 5857 GIC 193492 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
5858 : {
5287 5859 193492 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
8005 5860 193492 : int32 tuple_width = 0;
4524 5861 193492 : bool have_wholerow_var = false;
5862 : ListCell *lc;
5863 :
5864 : /* Vars are assumed to have cost zero, but other exprs do not */
2582 5865 193492 : rel->reltarget->cost.startup = 0;
5866 193492 : rel->reltarget->cost.per_tuple = 0;
5867 :
5868 655895 : foreach(lc, rel->reltarget->exprs)
5869 : {
5283 5870 462403 : Node *node = (Node *) lfirst(lc);
5871 :
5872 : /*
5873 : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
5874 : * but there are corner cases involving LATERAL references where that
5875 : * isn't so. If the Var has the wrong varno, fall through to the
5876 : * generic case (it doesn't seem worth the trouble to be any smarter).
5877 : */
3878 5878 462403 : if (IsA(node, Var) &&
5879 453477 : ((Var *) node)->varno == rel->relid)
6882 5880 126625 : {
5283 tgl 5881 CBC 453447 : Var *var = (Var *) node;
5882 : int ndx;
5283 tgl 5883 ECB : int32 item_width;
5287 5884 :
5283 tgl 5885 CBC 453447 : Assert(var->varattno >= rel->min_attr);
5283 tgl 5886 GIC 453447 : Assert(var->varattno <= rel->max_attr);
5887 :
5888 453447 : ndx = var->varattno - rel->min_attr;
9345 bruce 5889 ECB :
5283 tgl 5890 : /*
5891 : * If it's a whole-row Var, we'll deal with it below after we have
4382 bruce 5892 : * already cached as many attr widths as possible.
5893 : */
4524 tgl 5894 CBC 453447 : if (var->varattno == 0)
5895 : {
4524 tgl 5896 GIC 1228 : have_wholerow_var = true;
5897 1228 : continue;
5898 : }
5899 :
5900 : /*
5901 : * The width may have been cached already (especially if it's a
4382 bruce 5902 ECB : * subquery), so don't duplicate effort.
5283 tgl 5903 : */
5283 tgl 5904 CBC 452219 : if (rel->attr_widths[ndx] > 0)
8005 tgl 5905 ECB : {
5283 tgl 5906 GIC 98022 : tuple_width += rel->attr_widths[ndx];
7224 5907 98022 : continue;
5908 : }
5283 tgl 5909 ECB :
5910 : /* Try to get column width from statistics */
4524 tgl 5911 GIC 354197 : if (reloid != InvalidOid && var->varattno > 0)
5283 tgl 5912 ECB : {
5283 tgl 5913 GIC 271871 : item_width = get_attavgwidth(reloid, var->varattno);
5914 271871 : if (item_width > 0)
5915 : {
5916 227572 : rel->attr_widths[ndx] = item_width;
5917 227572 : tuple_width += item_width;
5283 tgl 5918 CBC 227572 : continue;
5919 : }
5283 tgl 5920 ECB : }
5921 :
5922 : /*
5923 : * Not a plain relation, or can't find statistics for it. Estimate
5924 : * using just the type info.
5925 : */
5283 tgl 5926 GIC 126625 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
5927 126625 : Assert(item_width > 0);
5283 tgl 5928 CBC 126625 : rel->attr_widths[ndx] = item_width;
5283 tgl 5929 GIC 126625 : tuple_width += item_width;
8005 tgl 5930 ECB : }
5283 tgl 5931 CBC 8956 : else if (IsA(node, PlaceHolderVar))
5932 : {
5933 : /*
5934 : * We will need to evaluate the PHV's contained expression while
2582 tgl 5935 ECB : * scanning this rel, so be sure to include it in reltarget->cost.
5936 : */
5283 tgl 5937 CBC 473 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
235 tgl 5938 GNC 473 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
5939 : QualCost cost;
7836 bruce 5940 ECB :
5283 tgl 5941 CBC 473 : tuple_width += phinfo->ph_width;
2607 5942 473 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
2582 tgl 5943 GIC 473 : rel->reltarget->cost.startup += cost.startup;
5944 473 : rel->reltarget->cost.per_tuple += cost.per_tuple;
5945 : }
5946 : else
5947 : {
5948 : /*
5949 : * We could be looking at an expression pulled up from a subquery,
3260 bruce 5950 ECB : * or a ROW() representing a whole-row child Var, etc. Do what we
4790 5951 : * can using the expression type information.
5020 tgl 5952 : */
5953 : int32 item_width;
5954 : QualCost cost;
5955 :
5020 tgl 5956 GIC 8483 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
5957 8483 : Assert(item_width > 0);
5958 8483 : tuple_width += item_width;
5959 : /* Not entirely clear if we need to account for cost, but do so */
2607 5960 8483 : cost_qual_eval_node(&cost, node, root);
2582 tgl 5961 CBC 8483 : rel->reltarget->cost.startup += cost.startup;
5962 8483 : rel->reltarget->cost.per_tuple += cost.per_tuple;
5963 : }
5964 : }
4524 tgl 5965 ECB :
5966 : /*
5967 : * If we have a whole-row reference, estimate its width as the sum of
2969 5968 : * per-column widths plus heap tuple header overhead.
5969 : */
4524 tgl 5970 GIC 193492 : if (have_wholerow_var)
5971 : {
2969 5972 1228 : int32 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
5973 :
4524 5974 1228 : if (reloid != InvalidOid)
5975 : {
5976 : /* Real relation, so estimate true tuple width */
5977 1052 : wholerow_width += get_relation_data_width(reloid,
2118 5978 1052 : rel->attr_widths - rel->min_attr);
5979 : }
4524 tgl 5980 ECB : else
5981 : {
5982 : /* Do what we can with info for a phony rel */
5983 : AttrNumber i;
5984 :
4524 tgl 5985 CBC 462 : for (i = 1; i <= rel->max_attr; i++)
5986 286 : wholerow_width += rel->attr_widths[i - rel->min_attr];
5987 : }
5988 :
4524 tgl 5989 GIC 1228 : rel->attr_widths[0 - rel->min_attr] = wholerow_width;
5990 :
5991 : /*
5992 : * Include the whole-row Var as part of the output tuple. Yes, that
5993 : * really is what happens at runtime.
4524 tgl 5994 ECB : */
4524 tgl 5995 GIC 1228 : tuple_width += wholerow_width;
4524 tgl 5996 ECB : }
5997 :
8005 tgl 5998 CBC 193492 : Assert(tuple_width >= 0);
2582 tgl 5999 GIC 193492 : rel->reltarget->width = tuple_width;
9770 scrappy 6000 193492 : }
9770 scrappy 6001 ECB :
2589 tgl 6002 : /*
6003 : * set_pathtarget_cost_width
6004 : * Set the estimated eval cost and output width of a PathTarget tlist.
6005 : *
6006 : * As a notational convenience, returns the same PathTarget pointer passed in.
6007 : *
6008 : * Most, though not quite all, uses of this function occur after we've run
6009 : * set_rel_width() for base relations; so we can usually obtain cached width
6010 : * estimates for Vars. If we can't, fall back on datatype-based width
6011 : * estimates. Present early-planning uses of PathTargets don't need accurate
6012 : * widths badly enough to justify going to the catalogs for better data.
6013 : */
6014 : PathTarget *
2589 tgl 6015 GIC 256443 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6016 : {
6017 256443 : int32 tuple_width = 0;
6018 : ListCell *lc;
2589 tgl 6019 ECB :
6020 : /* Vars are assumed to have cost zero, but other exprs do not */
2589 tgl 6021 GIC 256443 : target->cost.startup = 0;
2589 tgl 6022 CBC 256443 : target->cost.per_tuple = 0;
2589 tgl 6023 ECB :
2589 tgl 6024 CBC 883541 : foreach(lc, target->exprs)
6025 : {
2589 tgl 6026 GIC 627098 : Node *node = (Node *) lfirst(lc);
6027 :
20 drowley 6028 GNC 627098 : tuple_width += get_expr_width(root, node);
6029 :
6030 : /* For non-Vars, account for evaluation cost */
6031 627098 : if (!IsA(node, Var))
6032 : {
6033 : QualCost cost;
6034 :
2589 tgl 6035 GIC 316565 : cost_qual_eval_node(&cost, node, root);
6036 316565 : target->cost.startup += cost.startup;
2589 tgl 6037 CBC 316565 : target->cost.per_tuple += cost.per_tuple;
6038 : }
6039 : }
6040 :
6041 256443 : Assert(tuple_width >= 0);
2589 tgl 6042 GIC 256443 : target->width = tuple_width;
2589 tgl 6043 ECB :
2589 tgl 6044 GIC 256443 : return target;
6045 : }
2589 tgl 6046 ECB :
6047 : /*
6048 : * get_expr_width
6049 : * Estimate the width of the given expr attempting to use the width
6050 : * cached in a Var's owning RelOptInfo, else fallback on the type's
6051 : * average width when unable to or when the given Node is not a Var.
6052 : */
6053 : static int32
20 drowley 6054 GNC 726524 : get_expr_width(PlannerInfo *root, const Node *expr)
6055 : {
6056 : int32 width;
6057 :
6058 726524 : if (IsA(expr, Var))
6059 : {
6060 404785 : const Var *var = (const Var *) expr;
6061 :
6062 : /* We should not see any upper-level Vars here */
6063 404785 : Assert(var->varlevelsup == 0);
6064 :
6065 : /* Try to get data from RelOptInfo cache */
6066 404785 : if (!IS_SPECIAL_VARNO(var->varno) &&
6067 402446 : var->varno < root->simple_rel_array_size)
6068 : {
6069 402446 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6070 :
6071 402446 : if (rel != NULL &&
6072 391151 : var->varattno >= rel->min_attr &&
6073 391151 : var->varattno <= rel->max_attr)
6074 : {
6075 391151 : int ndx = var->varattno - rel->min_attr;
6076 :
6077 391151 : if (rel->attr_widths[ndx] > 0)
6078 379489 : return rel->attr_widths[ndx];
6079 : }
6080 : }
6081 :
6082 : /*
6083 : * No cached data available, so estimate using just the type info.
6084 : */
6085 25296 : width = get_typavgwidth(var->vartype, var->vartypmod);
6086 25296 : Assert(width > 0);
6087 :
6088 25296 : return width;
6089 : }
6090 :
6091 321739 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6092 321739 : Assert(width > 0);
6093 321739 : return width;
6094 : }
6095 :
6096 : /*
6097 : * relation_byte_size
8720 bruce 6098 ECB : * Estimate the storage space in bytes for a given number of tuples
6099 : * of a given width (size in bytes).
6100 : */
8770 tgl 6101 : static double
8491 tgl 6102 GIC 1338195 : relation_byte_size(double tuples, int width)
8770 tgl 6103 ECB : {
2969 tgl 6104 CBC 1338195 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
8770 tgl 6105 ECB : }
6106 :
9345 bruce 6107 : /*
6108 : * page_size
6109 : * Returns an estimate of the number of pages covered by a given
6110 : * number of tuples of a given width (size in bytes).
6111 : */
6112 : static double
8491 tgl 6113 GIC 4322 : page_size(double tuples, int width)
6114 : {
6115 4322 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6116 : }
2277 rhaas 6117 ECB :
6118 : /*
6119 : * Estimate the fraction of the work that each worker will do given the
6120 : * number of workers budgeted for the path.
6121 : */
6122 : static double
2277 rhaas 6123 CBC 64881 : get_parallel_divisor(Path *path)
2277 rhaas 6124 ECB : {
2277 rhaas 6125 CBC 64881 : double parallel_divisor = path->parallel_workers;
6126 :
6127 : /*
6128 : * Early experience with parallel query suggests that when there is only
6129 : * one worker, the leader often makes a very substantial contribution to
6130 : * executing the parallel portion of the plan, but as more workers are
6131 : * added, it does less and less, because it's busy reading tuples from the
6132 : * workers and doing whatever non-parallel post-processing is needed. By
6133 : * the time we reach 4 workers, the leader no longer makes a meaningful
2277 rhaas 6134 ECB : * contribution. Thus, for now, estimate that the leader spends 30% of
6135 : * its time servicing each worker, and the remainder executing the
6136 : * parallel plan.
6137 : */
1971 rhaas 6138 GIC 64881 : if (parallel_leader_participation)
6139 : {
6140 : double leader_contribution;
6141 :
6142 64497 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6143 64497 : if (leader_contribution > 0)
6144 64041 : parallel_divisor += leader_contribution;
1971 rhaas 6145 ECB : }
6146 :
2277 rhaas 6147 CBC 64881 : return parallel_divisor;
6148 : }
6149 :
6150 : /*
6151 : * compute_bitmap_pages
6152 : *
6153 : * compute number of pages fetched from heap in bitmap heap scan.
6154 : */
2263 rhaas 6155 ECB : double
2263 rhaas 6156 GIC 233698 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
2263 rhaas 6157 ECB : int loop_count, Cost *cost, double *tuple)
6158 : {
6159 : Cost indexTotalCost;
6160 : Selectivity indexSelectivity;
6161 : double T;
6162 : double pages_fetched;
6163 : double tuples_fetched;
6164 : double heap_pages;
6165 : long maxentries;
6166 :
6167 : /*
6168 : * Fetch total cost of obtaining the bitmap, as well as its total
6169 : * selectivity.
6170 : */
2263 rhaas 6171 GIC 233698 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6172 :
6173 : /*
2263 rhaas 6174 ECB : * Estimate number of main-table pages fetched.
6175 : */
2263 rhaas 6176 CBC 233698 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6177 :
2263 rhaas 6178 GIC 233698 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
2263 rhaas 6179 ECB :
6180 : /*
6181 : * For a single scan, the number of heap pages that need to be fetched is
6182 : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6183 : * re-reads needed).
6184 : */
1976 rhaas 6185 GIC 233698 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6186 :
6187 : /*
1976 rhaas 6188 ECB : * Calculate the number of pages fetched from the heap. Then based on
6189 : * current work_mem estimate get the estimated maxentries in the bitmap.
6190 : * (Note that we always do this calculation based on the number of pages
6191 : * that would be fetched in a single iteration, even if loop_count > 1.
6192 : * That's correct, because only that number of entries will be stored in
6193 : * the bitmap at one time.)
6194 : */
1976 rhaas 6195 GIC 233698 : heap_pages = Min(pages_fetched, baserel->pages);
6196 233698 : maxentries = tbm_calculate_entries(work_mem * 1024L);
6197 :
2263 6198 233698 : if (loop_count > 1)
6199 : {
6200 : /*
6201 : * For repeated bitmap scans, scale up the number of tuples fetched in
6202 : * the Mackert and Lohman formula by the number of scans, so that we
2263 rhaas 6203 ECB : * estimate the number of pages fetched by all the scans. Then
6204 : * pro-rate for one scan.
6205 : */
2263 rhaas 6206 GIC 47275 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6207 : baserel->pages,
2263 rhaas 6208 ECB : get_indexpath_pages(bitmapqual),
6209 : root);
2263 rhaas 6210 CBC 47275 : pages_fetched /= loop_count;
6211 : }
6212 :
2263 rhaas 6213 GIC 233698 : if (pages_fetched >= T)
6214 22028 : pages_fetched = T;
6215 : else
6216 211670 : pages_fetched = ceil(pages_fetched);
2263 rhaas 6217 ECB :
1976 rhaas 6218 GIC 233698 : if (maxentries < heap_pages)
6219 : {
6220 : double exact_pages;
6221 : double lossy_pages;
6222 :
6223 : /*
6224 : * Crude approximation of the number of lossy pages. Because of the
6225 : * way tbm_lossify() is coded, the number of lossy pages increases
6226 : * very sharply as soon as we run short of memory; this formula has
1976 rhaas 6227 ECB : * that property and seems to perform adequately in testing, but it's
6228 : * possible we could do better somehow.
6229 : */
1976 rhaas 6230 CBC 9 : lossy_pages = Max(0, heap_pages - maxentries / 2);
1976 rhaas 6231 GIC 9 : exact_pages = heap_pages - lossy_pages;
6232 :
6233 : /*
6234 : * If there are lossy pages then recompute the number of tuples
6235 : * processed by the bitmap heap node. We assume here that the chance
6236 : * of a given tuple coming from an exact page is the same as the
6237 : * chance that a given page is exact. This might not be true, but
1976 rhaas 6238 ECB : * it's not clear how we can do any better.
6239 : */
1976 rhaas 6240 GIC 9 : if (lossy_pages > 0)
6241 : tuples_fetched =
1976 rhaas 6242 CBC 9 : clamp_row_est(indexSelectivity *
1976 rhaas 6243 GIC 9 : (exact_pages / heap_pages) * baserel->tuples +
6244 9 : (lossy_pages / heap_pages) * baserel->tuples);
1976 rhaas 6245 ECB : }
6246 :
2263 rhaas 6247 GIC 233698 : if (cost)
2263 rhaas 6248 CBC 182089 : *cost = indexTotalCost;
2263 rhaas 6249 GIC 233698 : if (tuple)
2263 rhaas 6250 CBC 182089 : *tuple = tuples_fetched;
6251 :
2263 rhaas 6252 GIC 233698 : return pages_fetched;
6253 : }
|