Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * costsize.c
4 : : * Routines to compute (and set) relation sizes and path costs
5 : : *
6 : : * Path costs are measured in arbitrary units established by these basic
7 : : * parameters:
8 : : *
9 : : * seq_page_cost Cost of a sequential page fetch
10 : : * random_page_cost Cost of a non-sequential page fetch
11 : : * cpu_tuple_cost Cost of typical CPU time to process a tuple
12 : : * cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
13 : : * cpu_operator_cost Cost of CPU time to execute an operator or function
14 : : * parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
15 : : * parallel_setup_cost Cost of setting up shared memory for parallelism
16 : : *
17 : : * We expect that the kernel will typically do some amount of read-ahead
18 : : * optimization; this in conjunction with seek costs means that seq_page_cost
19 : : * is normally considerably less than random_page_cost. (However, if the
20 : : * database is fully cached in RAM, it is reasonable to set them equal.)
21 : : *
22 : : * We also use a rough estimate "effective_cache_size" of the number of
23 : : * disk pages in Postgres + OS-level disk cache. (We can't simply use
24 : : * NBuffers for this purpose because that would ignore the effects of
25 : : * the kernel's disk cache.)
26 : : *
27 : : * Obviously, taking constants for these values is an oversimplification,
28 : : * but it's tough enough to get any useful estimates even at this level of
29 : : * detail. Note that all of these parameters are user-settable, in case
30 : : * the default values are drastically off for a particular platform.
31 : : *
32 : : * seq_page_cost and random_page_cost can also be overridden for an individual
33 : : * tablespace, in case some data is on a fast disk and other data is on a slow
34 : : * disk. Per-tablespace overrides never apply to temporary work files such as
35 : : * an external sort or a materialize node that overflows work_mem.
36 : : *
37 : : * We compute two separate costs for each path:
38 : : * total_cost: total estimated cost to fetch all tuples
39 : : * startup_cost: cost that is expended before first tuple is fetched
40 : : * In some scenarios, such as when there is a LIMIT or we are implementing
41 : : * an EXISTS(...) sub-select, it is not necessary to fetch all tuples of the
42 : : * path's result. A caller can estimate the cost of fetching a partial
43 : : * result by interpolating between startup_cost and total_cost. In detail:
44 : : * actual_cost = startup_cost +
45 : : * (total_cost - startup_cost) * tuples_to_fetch / path->rows;
46 : : * Note that a base relation's rows count (and, by extension, plan_rows for
47 : : * plan nodes below the LIMIT node) are set without regard to any LIMIT, so
48 : : * that this equation works properly. (Note: while path->rows is never zero
49 : : * for ordinary relations, it is zero for paths for provably-empty relations,
50 : : * so beware of division-by-zero.) The LIMIT is applied as a top-level
51 : : * plan node.
52 : : *
53 : : * For largely historical reasons, most of the routines in this module use
54 : : * the passed result Path only to store their results (rows, startup_cost and
55 : : * total_cost) into. All the input data they need is passed as separate
56 : : * parameters, even though much of it could be extracted from the Path.
57 : : * An exception is made for the cost_XXXjoin() routines, which expect all
58 : : * the other fields of the passed XXXPath to be filled in, and similarly
59 : : * cost_index() assumes the passed IndexPath is valid except for its output
60 : : * values.
61 : : *
62 : : *
63 : : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
64 : : * Portions Copyright (c) 1994, Regents of the University of California
65 : : *
66 : : * IDENTIFICATION
67 : : * src/backend/optimizer/path/costsize.c
68 : : *
69 : : *-------------------------------------------------------------------------
70 : : */
71 : :
72 : : #include "postgres.h"
73 : :
74 : : #include <limits.h>
75 : : #include <math.h>
76 : :
77 : : #include "access/amapi.h"
78 : : #include "access/htup_details.h"
79 : : #include "access/tsmapi.h"
80 : : #include "executor/executor.h"
81 : : #include "executor/nodeAgg.h"
82 : : #include "executor/nodeHash.h"
83 : : #include "executor/nodeMemoize.h"
84 : : #include "miscadmin.h"
85 : : #include "nodes/makefuncs.h"
86 : : #include "nodes/nodeFuncs.h"
87 : : #include "optimizer/clauses.h"
88 : : #include "optimizer/cost.h"
89 : : #include "optimizer/optimizer.h"
90 : : #include "optimizer/pathnode.h"
91 : : #include "optimizer/paths.h"
92 : : #include "optimizer/placeholder.h"
93 : : #include "optimizer/plancat.h"
94 : : #include "optimizer/restrictinfo.h"
95 : : #include "parser/parsetree.h"
96 : : #include "utils/lsyscache.h"
97 : : #include "utils/selfuncs.h"
98 : : #include "utils/spccache.h"
99 : : #include "utils/tuplesort.h"
100 : :
101 : :
102 : : #define LOG2(x) (log(x) / 0.693147180559945)
103 : :
104 : : /*
105 : : * Append and MergeAppend nodes are less expensive than some other operations
106 : : * which use cpu_tuple_cost; instead of adding a separate GUC, estimate the
107 : : * per-tuple cost as cpu_tuple_cost multiplied by this value.
108 : : */
109 : : #define APPEND_CPU_COST_MULTIPLIER 0.5
110 : :
111 : : /*
112 : : * Maximum value for row estimates. We cap row estimates to this to help
113 : : * ensure that costs based on these estimates remain within the range of what
114 : : * double can represent. add_path() wouldn't act sanely given infinite or NaN
115 : : * cost values.
116 : : */
117 : : #define MAXIMUM_ROWCOUNT 1e100
118 : :
119 : : double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
120 : : double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
121 : : double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
122 : : double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
123 : : double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
124 : : double parallel_tuple_cost = DEFAULT_PARALLEL_TUPLE_COST;
125 : : double parallel_setup_cost = DEFAULT_PARALLEL_SETUP_COST;
126 : : double recursive_worktable_factor = DEFAULT_RECURSIVE_WORKTABLE_FACTOR;
127 : :
128 : : int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
129 : :
130 : : Cost disable_cost = 1.0e10;
131 : :
132 : : int max_parallel_workers_per_gather = 2;
133 : :
134 : : bool enable_seqscan = true;
135 : : bool enable_indexscan = true;
136 : : bool enable_indexonlyscan = true;
137 : : bool enable_bitmapscan = true;
138 : : bool enable_tidscan = true;
139 : : bool enable_sort = true;
140 : : bool enable_incremental_sort = true;
141 : : bool enable_hashagg = true;
142 : : bool enable_nestloop = true;
143 : : bool enable_material = true;
144 : : bool enable_memoize = true;
145 : : bool enable_mergejoin = true;
146 : : bool enable_hashjoin = true;
147 : : bool enable_gathermerge = true;
148 : : bool enable_partitionwise_join = false;
149 : : bool enable_partitionwise_aggregate = false;
150 : : bool enable_parallel_append = true;
151 : : bool enable_parallel_hash = true;
152 : : bool enable_partition_pruning = true;
153 : : bool enable_presorted_aggregate = true;
154 : : bool enable_async_append = true;
155 : :
156 : : typedef struct
157 : : {
158 : : PlannerInfo *root;
159 : : QualCost total;
160 : : } cost_qual_eval_context;
161 : :
162 : : static List *extract_nonindex_conditions(List *qual_clauses, List *indexclauses);
163 : : static MergeScanSelCache *cached_scansel(PlannerInfo *root,
164 : : RestrictInfo *rinfo,
165 : : PathKey *pathkey);
166 : : static void cost_rescan(PlannerInfo *root, Path *path,
167 : : Cost *rescan_startup_cost, Cost *rescan_total_cost);
168 : : static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
169 : : static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
170 : : ParamPathInfo *param_info,
171 : : QualCost *qpqual_cost);
172 : : static bool has_indexed_join_quals(NestPath *path);
173 : : static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
174 : : List *quals);
175 : : static double calc_joinrel_size_estimate(PlannerInfo *root,
176 : : RelOptInfo *joinrel,
177 : : RelOptInfo *outer_rel,
178 : : RelOptInfo *inner_rel,
179 : : double outer_rows,
180 : : double inner_rows,
181 : : SpecialJoinInfo *sjinfo,
182 : : List *restrictlist);
183 : : static Selectivity get_foreign_key_join_selectivity(PlannerInfo *root,
184 : : Relids outer_relids,
185 : : Relids inner_relids,
186 : : SpecialJoinInfo *sjinfo,
187 : : List **restrictlist);
188 : : static Cost append_nonpartial_cost(List *subpaths, int numpaths,
189 : : int parallel_workers);
190 : : static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
191 : : static int32 get_expr_width(PlannerInfo *root, const Node *expr);
192 : : static double relation_byte_size(double tuples, int width);
193 : : static double page_size(double tuples, int width);
194 : : static double get_parallel_divisor(Path *path);
195 : :
196 : :
197 : : /*
198 : : * clamp_row_est
199 : : * Force a row-count estimate to a sane value.
200 : : */
201 : : double
7405 tgl@sss.pgh.pa.us 202 :CBC 3371104 : clamp_row_est(double nrows)
203 : : {
204 : : /*
205 : : * Avoid infinite and NaN row estimates. Costs derived from such values
206 : : * are going to be useless. Also force the estimate to be at least one
207 : : * row, to make explain output look better and to avoid possible
208 : : * divide-by-zero when interpolating costs. Make it an integer, too.
209 : : */
1273 drowley@postgresql.o 210 [ + - - + ]: 3371104 : if (nrows > MAXIMUM_ROWCOUNT || isnan(nrows))
1273 drowley@postgresql.o 211 :UBC 0 : nrows = MAXIMUM_ROWCOUNT;
1273 drowley@postgresql.o 212 [ + + ]:CBC 3371104 : else if (nrows <= 1.0)
7405 tgl@sss.pgh.pa.us 213 : 1300010 : nrows = 1.0;
214 : : else
6933 215 : 2071094 : nrows = rint(nrows);
216 : :
7405 217 : 3371104 : return nrows;
218 : : }
219 : :
220 : : /*
221 : : * clamp_width_est
222 : : * Force a tuple-width estimate to a sane value.
223 : : *
224 : : * The planner represents datatype width and tuple width estimates as int32.
225 : : * When summing column width estimates to create a tuple width estimate,
226 : : * it's possible to reach integer overflow in edge cases. To ensure sane
227 : : * behavior, we form such sums in int64 arithmetic and then apply this routine
228 : : * to clamp to int32 range.
229 : : */
230 : : int32
117 tgl@sss.pgh.pa.us 231 :GNC 827551 : clamp_width_est(int64 tuple_width)
232 : : {
233 : : /*
234 : : * Anything more than MaxAllocSize is clearly bogus, since we could not
235 : : * create a tuple that large.
236 : : */
237 [ - + ]: 827551 : if (tuple_width > MaxAllocSize)
117 tgl@sss.pgh.pa.us 238 :UNC 0 : return (int32) MaxAllocSize;
239 : :
240 : : /*
241 : : * Unlike clamp_row_est, we just Assert that the value isn't negative,
242 : : * rather than masking such errors.
243 : : */
117 tgl@sss.pgh.pa.us 244 [ - + ]:GNC 827551 : Assert(tuple_width >= 0);
245 : :
246 : 827551 : return (int32) tuple_width;
247 : : }
248 : :
249 : : /*
250 : : * clamp_cardinality_to_long
251 : : * Cast a Cardinality value to a sane long value.
252 : : */
253 : : long
694 tgl@sss.pgh.pa.us 254 :CBC 20914 : clamp_cardinality_to_long(Cardinality x)
255 : : {
256 : : /*
257 : : * Just for paranoia's sake, ensure we do something sane with negative or
258 : : * NaN values.
259 : : */
260 [ - + ]: 20914 : if (isnan(x))
694 tgl@sss.pgh.pa.us 261 :UBC 0 : return LONG_MAX;
694 tgl@sss.pgh.pa.us 262 [ + + ]:CBC 20914 : if (x <= 0)
263 : 244 : return 0;
264 : :
265 : : /*
266 : : * If "long" is 64 bits, then LONG_MAX cannot be represented exactly as a
267 : : * double. Casting it to double and back may well result in overflow due
268 : : * to rounding, so avoid doing that. We trust that any double value that
269 : : * compares strictly less than "(double) LONG_MAX" will cast to a
270 : : * representable "long" value.
271 : : */
272 [ + - ]: 20670 : return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
273 : : }
274 : :
275 : :
276 : : /*
277 : : * cost_seqscan
278 : : * Determines and returns the cost of scanning a relation sequentially.
279 : : *
280 : : * 'baserel' is the relation to be scanned
281 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
282 : : */
283 : : void
6888 284 : 186469 : cost_seqscan(Path *path, PlannerInfo *root,
285 : : RelOptInfo *baserel, ParamPathInfo *param_info)
286 : : {
8825 287 : 186469 : Cost startup_cost = 0;
288 : : Cost cpu_run_cost;
289 : : Cost disk_run_cost;
290 : : double spc_seq_page_cost;
291 : : QualCost qpqual_cost;
292 : : Cost cpu_per_tuple;
293 : :
294 : : /* Should only be applied to base relations */
7736 295 [ - + ]: 186469 : Assert(baserel->relid > 0);
8008 296 [ - + ]: 186469 : Assert(baserel->rtekind == RTE_RELATION);
297 : :
298 : : /* Mark the path with the correct row estimate */
4378 299 [ + + ]: 186469 : if (param_info)
300 : 372 : path->rows = param_info->ppi_rows;
301 : : else
302 : 186097 : path->rows = baserel->rows;
303 : :
8849 304 [ + + ]: 186469 : if (!enable_seqscan)
8825 305 : 7817 : startup_cost += disable_cost;
306 : :
307 : : /* fetch estimated page cost for tablespace containing table */
5213 rhaas@postgresql.org 308 : 186469 : get_tablespace_page_costs(baserel->reltablespace,
309 : : NULL,
310 : : &spc_seq_page_cost);
311 : :
312 : : /*
313 : : * disk costs
314 : : */
3007 315 : 186469 : disk_run_cost = spc_seq_page_cost * baserel->pages;
316 : :
317 : : /* CPU costs */
4378 tgl@sss.pgh.pa.us 318 : 186469 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
319 : :
320 : 186469 : startup_cost += qpqual_cost.startup;
321 : 186469 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
3007 rhaas@postgresql.org 322 : 186469 : cpu_run_cost = cpu_per_tuple * baserel->tuples;
323 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 tgl@sss.pgh.pa.us 324 : 186469 : startup_cost += path->pathtarget->cost.startup;
325 : 186469 : cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
326 : :
327 : : /* Adjust costing for parallelism, if used. */
2866 rhaas@postgresql.org 328 [ + + ]: 186469 : if (path->parallel_workers > 0)
329 : : {
2648 330 : 13035 : double parallel_divisor = get_parallel_divisor(path);
331 : :
332 : : /* The CPU cost is divided among all the workers. */
3007 333 : 13035 : cpu_run_cost /= parallel_divisor;
334 : :
335 : : /*
336 : : * It may be possible to amortize some of the I/O cost, but probably
337 : : * not very much, because most operating systems already do aggressive
338 : : * prefetching. For now, we assume that the disk run cost can't be
339 : : * amortized at all.
340 : : */
341 : :
342 : : /*
343 : : * In the case of a parallel plan, the row count needs to represent
344 : : * the number of tuples processed per worker.
345 : : */
2648 346 : 13035 : path->rows = clamp_row_est(path->rows / parallel_divisor);
347 : : }
348 : :
3257 simon@2ndQuadrant.co 349 : 186469 : path->startup_cost = startup_cost;
3007 rhaas@postgresql.org 350 : 186469 : path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
3257 simon@2ndQuadrant.co 351 : 186469 : }
352 : :
353 : : /*
354 : : * cost_samplescan
355 : : * Determines and returns the cost of scanning a relation using sampling.
356 : : *
357 : : * 'baserel' is the relation to be scanned
358 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
359 : : */
360 : : void
3186 tgl@sss.pgh.pa.us 361 : 150 : cost_samplescan(Path *path, PlannerInfo *root,
362 : : RelOptInfo *baserel, ParamPathInfo *param_info)
363 : : {
3257 simon@2ndQuadrant.co 364 : 150 : Cost startup_cost = 0;
365 : 150 : Cost run_cost = 0;
366 : : RangeTblEntry *rte;
367 : : TableSampleClause *tsc;
368 : : TsmRoutine *tsm;
369 : : double spc_seq_page_cost,
370 : : spc_random_page_cost,
371 : : spc_page_cost;
372 : : QualCost qpqual_cost;
373 : : Cost cpu_per_tuple;
374 : :
375 : : /* Should only be applied to base relations with tablesample clauses */
376 [ - + ]: 150 : Assert(baserel->relid > 0);
3186 tgl@sss.pgh.pa.us 377 [ + - ]: 150 : rte = planner_rt_fetch(baserel->relid, root);
378 [ - + ]: 150 : Assert(rte->rtekind == RTE_RELATION);
379 : 150 : tsc = rte->tablesample;
380 [ - + ]: 150 : Assert(tsc != NULL);
381 : 150 : tsm = GetTsmRoutine(tsc->tsmhandler);
382 : :
383 : : /* Mark the path with the correct row estimate */
384 [ + + ]: 150 : if (param_info)
385 : 33 : path->rows = param_info->ppi_rows;
386 : : else
3257 simon@2ndQuadrant.co 387 : 117 : path->rows = baserel->rows;
388 : :
389 : : /* fetch estimated page cost for tablespace containing table */
390 : 150 : get_tablespace_page_costs(baserel->reltablespace,
391 : : &spc_random_page_cost,
392 : : &spc_seq_page_cost);
393 : :
394 : : /* if NextSampleBlock is used, assume random access, else sequential */
3186 tgl@sss.pgh.pa.us 395 : 300 : spc_page_cost = (tsm->NextSampleBlock != NULL) ?
396 [ + + ]: 150 : spc_random_page_cost : spc_seq_page_cost;
397 : :
398 : : /*
399 : : * disk costs (recall that baserel->pages has already been set to the
400 : : * number of pages the sampling method will visit)
401 : : */
402 : 150 : run_cost += spc_page_cost * baserel->pages;
403 : :
404 : : /*
405 : : * CPU costs (recall that baserel->tuples has already been set to the
406 : : * number of tuples the sampling method will select). Note that we ignore
407 : : * execution cost of the TABLESAMPLE parameter expressions; they will be
408 : : * evaluated only once per scan, and in most usages they'll likely be
409 : : * simple constants anyway. We also don't charge anything for the
410 : : * calculations the sampling method might do internally.
411 : : */
412 : 150 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
413 : :
3257 simon@2ndQuadrant.co 414 : 150 : startup_cost += qpqual_cost.startup;
415 : 150 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
3186 tgl@sss.pgh.pa.us 416 : 150 : run_cost += cpu_per_tuple * baserel->tuples;
417 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 418 : 150 : startup_cost += path->pathtarget->cost.startup;
419 : 150 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
420 : :
8825 421 : 150 : path->startup_cost = startup_cost;
422 : 150 : path->total_cost = startup_cost + run_cost;
10141 scrappy@hub.org 423 : 150 : }
424 : :
425 : : /*
426 : : * cost_gather
427 : : * Determines and returns the cost of gather path.
428 : : *
429 : : * 'rel' is the relation to be operated upon
430 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
431 : : * 'rows' may be used to point to a row estimate; if non-NULL, it overrides
432 : : * both 'rel' and 'param_info'. This is useful when the path doesn't exactly
433 : : * correspond to any particular RelOptInfo.
434 : : */
435 : : void
3119 rhaas@postgresql.org 436 : 8068 : cost_gather(GatherPath *path, PlannerInfo *root,
437 : : RelOptInfo *rel, ParamPathInfo *param_info,
438 : : double *rows)
439 : : {
440 : 8068 : Cost startup_cost = 0;
441 : 8068 : Cost run_cost = 0;
442 : :
443 : : /* Mark the path with the correct row estimate */
2946 444 [ + + ]: 8068 : if (rows)
445 : 869 : path->path.rows = *rows;
446 [ - + ]: 7199 : else if (param_info)
3119 rhaas@postgresql.org 447 :UBC 0 : path->path.rows = param_info->ppi_rows;
448 : : else
3119 rhaas@postgresql.org 449 :CBC 7199 : path->path.rows = rel->rows;
450 : :
451 : 8068 : startup_cost = path->subpath->startup_cost;
452 : :
453 : 8068 : run_cost = path->subpath->total_cost - path->subpath->startup_cost;
454 : :
455 : : /* Parallel setup and communication cost. */
456 : 8068 : startup_cost += parallel_setup_cost;
3077 457 : 8068 : run_cost += parallel_tuple_cost * path->path.rows;
458 : :
3119 459 : 8068 : path->path.startup_cost = startup_cost;
460 : 8068 : path->path.total_cost = (startup_cost + run_cost);
461 : 8068 : }
462 : :
463 : : /*
464 : : * cost_gather_merge
465 : : * Determines and returns the cost of gather merge path.
466 : : *
467 : : * GatherMerge merges several pre-sorted input streams, using a heap that at
468 : : * any given instant holds the next tuple from each stream. If there are N
469 : : * streams, we need about N*log2(N) tuple comparisons to construct the heap at
470 : : * startup, and then for each output tuple, about log2(N) comparisons to
471 : : * replace the top heap entry with the next tuple from the same stream.
472 : : */
473 : : void
2593 474 : 4904 : cost_gather_merge(GatherMergePath *path, PlannerInfo *root,
475 : : RelOptInfo *rel, ParamPathInfo *param_info,
476 : : Cost input_startup_cost, Cost input_total_cost,
477 : : double *rows)
478 : : {
479 : 4904 : Cost startup_cost = 0;
480 : 4904 : Cost run_cost = 0;
481 : : Cost comparison_cost;
482 : : double N;
483 : : double logN;
484 : :
485 : : /* Mark the path with the correct row estimate */
486 [ + + ]: 4904 : if (rows)
487 : 2240 : path->path.rows = *rows;
488 [ - + ]: 2664 : else if (param_info)
2593 rhaas@postgresql.org 489 :UBC 0 : path->path.rows = param_info->ppi_rows;
490 : : else
2593 rhaas@postgresql.org 491 :CBC 2664 : path->path.rows = rel->rows;
492 : :
493 [ - + ]: 4904 : if (!enable_gathermerge)
2593 rhaas@postgresql.org 494 :UBC 0 : startup_cost += disable_cost;
495 : :
496 : : /*
497 : : * Add one to the number of workers to account for the leader. This might
498 : : * be overgenerous since the leader will do less work than other workers
499 : : * in typical cases, but we'll go with it for now.
500 : : */
2593 rhaas@postgresql.org 501 [ - + ]:CBC 4904 : Assert(path->num_workers > 0);
502 : 4904 : N = (double) path->num_workers + 1;
503 : 4904 : logN = LOG2(N);
504 : :
505 : : /* Assumed cost per tuple comparison */
506 : 4904 : comparison_cost = 2.0 * cpu_operator_cost;
507 : :
508 : : /* Heap creation cost */
509 : 4904 : startup_cost += comparison_cost * N * logN;
510 : :
511 : : /* Per-tuple heap maintenance cost */
512 : 4904 : run_cost += path->path.rows * comparison_cost * logN;
513 : :
514 : : /* small cost for heap management, like cost_merge_append */
515 : 4904 : run_cost += cpu_operator_cost * path->path.rows;
516 : :
517 : : /*
518 : : * Parallel setup and communication cost. Since Gather Merge, unlike
519 : : * Gather, requires us to block until a tuple is available from every
520 : : * worker, we bump the IPC cost up a little bit as compared with Gather.
521 : : * For lack of a better idea, charge an extra 5%.
522 : : */
523 : 4904 : startup_cost += parallel_setup_cost;
524 : 4904 : run_cost += parallel_tuple_cost * path->path.rows * 1.05;
525 : :
526 : 4904 : path->path.startup_cost = startup_cost + input_startup_cost;
527 : 4904 : path->path.total_cost = (startup_cost + run_cost + input_total_cost);
528 : 4904 : }
529 : :
530 : : /*
531 : : * cost_index
532 : : * Determines and returns the cost of scanning a relation using an index.
533 : : *
534 : : * 'path' describes the indexscan under consideration, and is complete
535 : : * except for the fields to be set by this routine
536 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
537 : : * estimates of caching behavior
538 : : *
539 : : * In addition to rows, startup_cost and total_cost, cost_index() sets the
540 : : * path's indextotalcost and indexselectivity fields. These values will be
541 : : * needed if the IndexPath is used in a BitmapIndexScan.
542 : : *
543 : : * NOTE: path->indexquals must contain only clauses usable as index
544 : : * restrictions. Any additional quals evaluated as qpquals may reduce the
545 : : * number of returned tuples, but they won't reduce the number of tuples
546 : : * we have to fetch from the table, so they don't reduce the scan cost.
547 : : */
548 : : void
2615 549 : 322548 : cost_index(IndexPath *path, PlannerInfo *root, double loop_count,
550 : : bool partial_path)
551 : : {
4495 tgl@sss.pgh.pa.us 552 : 322548 : IndexOptInfo *index = path->indexinfo;
6958 553 : 322548 : RelOptInfo *baserel = index->rel;
4495 554 : 322548 : bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
555 : : amcostestimate_function amcostestimate;
556 : : List *qpquals;
8825 557 : 322548 : Cost startup_cost = 0;
558 : 322548 : Cost run_cost = 0;
2615 rhaas@postgresql.org 559 : 322548 : Cost cpu_run_cost = 0;
560 : : Cost indexStartupCost;
561 : : Cost indexTotalCost;
562 : : Selectivity indexSelectivity;
563 : : double indexCorrelation,
564 : : csquared;
565 : : double spc_seq_page_cost,
566 : : spc_random_page_cost;
567 : : Cost min_IO_cost,
568 : : max_IO_cost;
569 : : QualCost qpqual_cost;
570 : : Cost cpu_per_tuple;
571 : : double tuples_fetched;
572 : : double pages_fetched;
573 : : double rand_heap_pages;
574 : : double index_pages;
575 : :
576 : : /* Should only be applied to base relations */
8008 tgl@sss.pgh.pa.us 577 [ + - - + ]: 322548 : Assert(IsA(baserel, RelOptInfo) &&
578 : : IsA(index, IndexOptInfo));
7736 579 [ - + ]: 322548 : Assert(baserel->relid > 0);
8008 580 [ - + ]: 322548 : Assert(baserel->rtekind == RTE_RELATION);
581 : :
582 : : /*
583 : : * Mark the path with the correct row estimate, and identify which quals
584 : : * will need to be enforced as qpquals. We need not check any quals that
585 : : * are implied by the index's predicate, so we can use indrestrictinfo not
586 : : * baserestrictinfo as the list of relevant restriction clauses for the
587 : : * rel.
588 : : */
4378 589 [ + + ]: 322548 : if (path->path.param_info)
590 : : {
591 : 58351 : path->path.rows = path->path.param_info->ppi_rows;
592 : : /* qpquals come from the rel's restriction clauses and ppi_clauses */
1891 593 : 58351 : qpquals = list_concat(extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
594 : : path->indexclauses),
2489 595 : 58351 : extract_nonindex_conditions(path->path.param_info->ppi_clauses,
596 : : path->indexclauses));
597 : : }
598 : : else
599 : : {
4378 600 : 264197 : path->path.rows = baserel->rows;
601 : : /* qpquals come from just the rel's restriction clauses */
2936 602 : 264197 : qpquals = extract_nonindex_conditions(path->indexinfo->indrestrictinfo,
603 : : path->indexclauses);
604 : : }
605 : :
7806 606 [ + + ]: 322548 : if (!enable_indexscan)
8825 607 : 1972 : startup_cost += disable_cost;
608 : : /* we don't need to check enable_indexonlyscan; indxpath.c does that */
609 : :
610 : : /*
611 : : * Call index-access-method-specific code to estimate the processing cost
612 : : * for scanning the index, as well as the selectivity of the index (ie,
613 : : * the fraction of main-table tuples we will have to retrieve) and its
614 : : * correlation to the main-table tuple order. We need a cast here because
615 : : * pathnodes.h uses a weak function type to avoid including amapi.h.
616 : : */
3010 617 : 322548 : amcostestimate = (amcostestimate_function) index->amcostestimate;
618 : 322548 : amcostestimate(root, path, loop_count,
619 : : &indexStartupCost, &indexTotalCost,
620 : : &indexSelectivity, &indexCorrelation,
621 : : &index_pages);
622 : :
623 : : /*
624 : : * Save amcostestimate's results for possible use in bitmap scan planning.
625 : : * We don't bother to save indexStartupCost or indexCorrelation, because a
626 : : * bitmap scan doesn't care about either.
627 : : */
6933 628 : 322548 : path->indextotalcost = indexTotalCost;
629 : 322548 : path->indexselectivity = indexSelectivity;
630 : :
631 : : /* all costs for touching index itself included here */
8825 632 : 322548 : startup_cost += indexStartupCost;
633 : 322548 : run_cost += indexTotalCost - indexStartupCost;
634 : :
635 : : /* estimate number of main-table tuples fetched */
6522 636 : 322548 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
637 : :
638 : : /* fetch estimated page costs for tablespace containing table */
5213 rhaas@postgresql.org 639 : 322548 : get_tablespace_page_costs(baserel->reltablespace,
640 : : &spc_random_page_cost,
641 : : &spc_seq_page_cost);
642 : :
643 : : /*----------
644 : : * Estimate number of main-table pages fetched, and compute I/O cost.
645 : : *
646 : : * When the index ordering is uncorrelated with the table ordering,
647 : : * we use an approximation proposed by Mackert and Lohman (see
648 : : * index_pages_fetched() for details) to compute the number of pages
649 : : * fetched, and then charge spc_random_page_cost per page fetched.
650 : : *
651 : : * When the index ordering is exactly correlated with the table ordering
652 : : * (just after a CLUSTER, for example), the number of pages fetched should
653 : : * be exactly selectivity * table_size. What's more, all but the first
654 : : * will be sequential fetches, not the random fetches that occur in the
655 : : * uncorrelated case. So if the number of pages is more than 1, we
656 : : * ought to charge
657 : : * spc_random_page_cost + (pages_fetched - 1) * spc_seq_page_cost
658 : : * For partially-correlated indexes, we ought to charge somewhere between
659 : : * these two estimates. We currently interpolate linearly between the
660 : : * estimates based on the correlation squared (XXX is that appropriate?).
661 : : *
662 : : * If it's an index-only scan, then we will not need to fetch any heap
663 : : * pages for which the visibility map shows all tuples are visible.
664 : : * Hence, reduce the estimated number of heap fetches accordingly.
665 : : * We use the measured fraction of the entire heap that is all-visible,
666 : : * which might not be particularly relevant to the subset of the heap
667 : : * that this query will fetch; but it's not clear how to do better.
668 : : *----------
669 : : */
4461 tgl@sss.pgh.pa.us 670 [ + + ]: 322548 : if (loop_count > 1)
671 : : {
672 : : /*
673 : : * For repeated indexscans, the appropriate estimate for the
674 : : * uncorrelated case is to scale up the number of tuples fetched in
675 : : * the Mackert and Lohman formula by the number of scans, so that we
676 : : * estimate the number of pages fetched by all the scans; then
677 : : * pro-rate the costs for one scan. In this case we assume all the
678 : : * fetches are random accesses.
679 : : */
680 : 31047 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
681 : : baserel->pages,
6417 682 : 31047 : (double) index->pages,
683 : : root);
684 : :
4572 685 [ + + ]: 31047 : if (indexonly)
4566 686 : 3830 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
687 : :
2615 rhaas@postgresql.org 688 : 31047 : rand_heap_pages = pages_fetched;
689 : :
4461 tgl@sss.pgh.pa.us 690 : 31047 : max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
691 : :
692 : : /*
693 : : * In the perfectly correlated case, the number of pages touched by
694 : : * each scan is selectivity * table_size, and we can use the Mackert
695 : : * and Lohman formula at the page level to estimate how much work is
696 : : * saved by caching across scans. We still assume all the fetches are
697 : : * random, though, which is an overestimate that's hard to correct for
698 : : * without double-counting the cache effects. (But in most cases
699 : : * where such a plan is actually interesting, only one page would get
700 : : * fetched per scan anyway, so it shouldn't matter much.)
701 : : */
6330 702 : 31047 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
703 : :
4461 704 : 31047 : pages_fetched = index_pages_fetched(pages_fetched * loop_count,
705 : : baserel->pages,
6330 706 : 31047 : (double) index->pages,
707 : : root);
708 : :
4572 709 [ + + ]: 31047 : if (indexonly)
4566 710 : 3830 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
711 : :
4461 712 : 31047 : min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
713 : : }
714 : : else
715 : : {
716 : : /*
717 : : * Normal case: apply the Mackert and Lohman formula, and then
718 : : * interpolate between that and the correlation-derived result.
719 : : */
6522 720 : 291501 : pages_fetched = index_pages_fetched(tuples_fetched,
721 : : baserel->pages,
6417 722 : 291501 : (double) index->pages,
723 : : root);
724 : :
4572 725 [ + + ]: 291501 : if (indexonly)
4566 726 : 27368 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
727 : :
2615 rhaas@postgresql.org 728 : 291501 : rand_heap_pages = pages_fetched;
729 : :
730 : : /* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
5213 731 : 291501 : max_IO_cost = pages_fetched * spc_random_page_cost;
732 : :
733 : : /* min_IO_cost is for the perfectly correlated case (csquared=1) */
6522 tgl@sss.pgh.pa.us 734 : 291501 : pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
735 : :
4572 736 [ + + ]: 291501 : if (indexonly)
4566 737 : 27368 : pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
738 : :
4564 739 [ + + ]: 291501 : if (pages_fetched > 0)
740 : : {
741 : 263322 : min_IO_cost = spc_random_page_cost;
742 [ + + ]: 263322 : if (pages_fetched > 1)
743 : 74107 : min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
744 : : }
745 : : else
746 : 28179 : min_IO_cost = 0;
747 : : }
748 : :
2615 rhaas@postgresql.org 749 [ + + ]: 322548 : if (partial_path)
750 : : {
751 : : /*
752 : : * For index only scans compute workers based on number of index pages
753 : : * fetched; the number of heap pages we fetch might be so small as to
754 : : * effectively rule out parallelism, which we don't want to do.
755 : : */
2588 756 [ + + ]: 110994 : if (indexonly)
757 : 9626 : rand_heap_pages = -1;
758 : :
759 : : /*
760 : : * Estimate the number of parallel workers required to scan index. Use
761 : : * the number of heap pages computed considering heap fetches won't be
762 : : * sequential as for parallel scans the pages are accessed in random
763 : : * order.
764 : : */
2615 765 : 110994 : path->path.parallel_workers = compute_parallel_worker(baserel,
766 : : rand_heap_pages,
767 : : index_pages,
768 : : max_parallel_workers_per_gather);
769 : :
770 : : /*
771 : : * Fall out if workers can't be assigned for parallel scan, because in
772 : : * such a case this path will be rejected. So there is no benefit in
773 : : * doing extra computation.
774 : : */
775 [ + + ]: 110994 : if (path->path.parallel_workers <= 0)
776 : 106092 : return;
777 : :
778 : 4902 : path->path.parallel_aware = true;
779 : : }
780 : :
781 : : /*
782 : : * Now interpolate based on estimated index order correlation to get total
783 : : * disk I/O cost for main table accesses.
784 : : */
6330 tgl@sss.pgh.pa.us 785 : 216456 : csquared = indexCorrelation * indexCorrelation;
786 : :
787 : 216456 : run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
788 : :
789 : : /*
790 : : * Estimate CPU costs per tuple.
791 : : *
792 : : * What we want here is cpu_tuple_cost plus the evaluation costs of any
793 : : * qual clauses that we have to evaluate as qpquals.
794 : : */
3330 795 : 216456 : cost_qual_eval(&qpqual_cost, qpquals, root);
796 : :
4386 797 : 216456 : startup_cost += qpqual_cost.startup;
798 : 216456 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
799 : :
2615 rhaas@postgresql.org 800 : 216456 : cpu_run_cost += cpu_per_tuple * tuples_fetched;
801 : :
802 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 tgl@sss.pgh.pa.us 803 : 216456 : startup_cost += path->path.pathtarget->cost.startup;
2615 rhaas@postgresql.org 804 : 216456 : cpu_run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
805 : :
806 : : /* Adjust costing for parallelism, if used. */
807 [ + + ]: 216456 : if (path->path.parallel_workers > 0)
808 : : {
809 : 4902 : double parallel_divisor = get_parallel_divisor(&path->path);
810 : :
811 : 4902 : path->path.rows = clamp_row_est(path->path.rows / parallel_divisor);
812 : :
813 : : /* The CPU cost is divided among all the workers. */
814 : 4902 : cpu_run_cost /= parallel_divisor;
815 : : }
816 : :
817 : 216456 : run_cost += cpu_run_cost;
818 : :
6933 tgl@sss.pgh.pa.us 819 : 216456 : path->path.startup_cost = startup_cost;
820 : 216456 : path->path.total_cost = startup_cost + run_cost;
821 : : }
822 : :
823 : : /*
824 : : * extract_nonindex_conditions
825 : : *
826 : : * Given a list of quals to be enforced in an indexscan, extract the ones that
827 : : * will have to be applied as qpquals (ie, the index machinery won't handle
828 : : * them). Here we detect only whether a qual clause is directly redundant
829 : : * with some indexclause. If the index path is chosen for use, createplan.c
830 : : * will try a bit harder to get rid of redundant qual conditions; specifically
831 : : * it will see if quals can be proven to be implied by the indexquals. But
832 : : * it does not seem worth the cycles to try to factor that in at this stage,
833 : : * since we're only trying to estimate qual eval costs. Otherwise this must
834 : : * match the logic in create_indexscan_plan().
835 : : *
836 : : * qual_clauses, and the result, are lists of RestrictInfos.
837 : : * indexclauses is a list of IndexClauses.
838 : : */
839 : : static List *
1891 840 : 380899 : extract_nonindex_conditions(List *qual_clauses, List *indexclauses)
841 : : {
3330 842 : 380899 : List *result = NIL;
843 : : ListCell *lc;
844 : :
845 [ + + + + : 782586 : foreach(lc, qual_clauses)
+ + ]
846 : : {
2561 847 : 401687 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
848 : :
3330 849 [ + + ]: 401687 : if (rinfo->pseudoconstant)
850 : 4201 : continue; /* we may drop pseudoconstants here */
1891 851 [ + + ]: 397486 : if (is_redundant_with_indexclauses(rinfo, indexclauses))
852 : 235737 : continue; /* dup or derived from same EquivalenceClass */
853 : : /* ... skip the predicate proof attempt createplan.c will try ... */
3330 854 : 161749 : result = lappend(result, rinfo);
855 : : }
856 : 380899 : return result;
857 : : }
858 : :
859 : : /*
860 : : * index_pages_fetched
861 : : * Estimate the number of pages actually fetched after accounting for
862 : : * cache effects.
863 : : *
864 : : * We use an approximation proposed by Mackert and Lohman, "Index Scans
865 : : * Using a Finite LRU Buffer: A Validated I/O Model", ACM Transactions
866 : : * on Database Systems, Vol. 14, No. 3, September 1989, Pages 401-424.
867 : : * The Mackert and Lohman approximation is that the number of pages
868 : : * fetched is
869 : : * PF =
870 : : * min(2TNs/(2T+Ns), T) when T <= b
871 : : * 2TNs/(2T+Ns) when T > b and Ns <= 2Tb/(2T-b)
872 : : * b + (Ns - 2Tb/(2T-b))*(T-b)/T when T > b and Ns > 2Tb/(2T-b)
873 : : * where
874 : : * T = # pages in table
875 : : * N = # tuples in table
876 : : * s = selectivity = fraction of table to be scanned
877 : : * b = # buffer pages available (we include kernel space here)
878 : : *
879 : : * We assume that effective_cache_size is the total number of buffer pages
880 : : * available for the whole query, and pro-rate that space across all the
881 : : * tables in the query and the index currently under consideration. (This
882 : : * ignores space needed for other indexes used by the query, but since we
883 : : * don't know which indexes will get used, we can't estimate that very well;
884 : : * and in any case counting all the tables may well be an overestimate, since
885 : : * depending on the join plan not all the tables may be scanned concurrently.)
886 : : *
887 : : * The product Ns is the number of tuples fetched; we pass in that
888 : : * product rather than calculating it here. "pages" is the number of pages
889 : : * in the object under consideration (either an index or a table).
890 : : * "index_pages" is the amount to add to the total table space, which was
891 : : * computed for us by make_one_rel.
892 : : *
893 : : * Caller is expected to have ensured that tuples_fetched is greater than zero
894 : : * and rounded to integer (see clamp_row_est). The result will likewise be
895 : : * greater than zero and integral.
896 : : */
897 : : double
6522 898 : 437079 : index_pages_fetched(double tuples_fetched, BlockNumber pages,
899 : : double index_pages, PlannerInfo *root)
900 : : {
901 : : double pages_fetched;
902 : : double total_pages;
903 : : double T,
904 : : b;
905 : :
906 : : /* T is # pages in table, but don't allow it to be zero */
907 [ + + ]: 437079 : T = (pages > 1) ? (double) pages : 1.0;
908 : :
909 : : /* Compute number of pages assumed to be competing for cache space */
6417 910 : 437079 : total_pages = root->total_table_pages + index_pages;
911 [ + + ]: 437079 : total_pages = Max(total_pages, 1.0);
912 [ - + ]: 437079 : Assert(T <= total_pages);
913 : :
914 : : /* b is pro-rated share of effective_cache_size */
2489 915 : 437079 : b = (double) effective_cache_size * T / total_pages;
916 : :
917 : : /* force it positive and integral */
6522 918 [ - + ]: 437079 : if (b <= 1.0)
6522 tgl@sss.pgh.pa.us 919 :UBC 0 : b = 1.0;
920 : : else
6522 tgl@sss.pgh.pa.us 921 :CBC 437079 : b = ceil(b);
922 : :
923 : : /* This part is the Mackert and Lohman formula */
924 [ + - ]: 437079 : if (T <= b)
925 : : {
926 : 437079 : pages_fetched =
927 : 437079 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
928 [ + + ]: 437079 : if (pages_fetched >= T)
929 : 242783 : pages_fetched = T;
930 : : else
931 : 194296 : pages_fetched = ceil(pages_fetched);
932 : : }
933 : : else
934 : : {
935 : : double lim;
936 : :
6522 tgl@sss.pgh.pa.us 937 :UBC 0 : lim = (2.0 * T * b) / (2.0 * T - b);
938 [ # # ]: 0 : if (tuples_fetched <= lim)
939 : : {
940 : 0 : pages_fetched =
941 : 0 : (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
942 : : }
943 : : else
944 : : {
945 : 0 : pages_fetched =
946 : 0 : b + (tuples_fetched - lim) * (T - b) / T;
947 : : }
948 : 0 : pages_fetched = ceil(pages_fetched);
949 : : }
6522 tgl@sss.pgh.pa.us 950 :CBC 437079 : return pages_fetched;
951 : : }
952 : :
953 : : /*
954 : : * get_indexpath_pages
955 : : * Determine the total size of the indexes used in a bitmap index path.
956 : : *
957 : : * Note: if the same index is used more than once in a bitmap tree, we will
958 : : * count it multiple times, which perhaps is the wrong thing ... but it's
959 : : * not completely clear, and detecting duplicates is difficult, so ignore it
960 : : * for now.
961 : : */
962 : : static double
6417 963 : 67455 : get_indexpath_pages(Path *bitmapqual)
964 : : {
965 : 67455 : double result = 0;
966 : : ListCell *l;
967 : :
968 [ + + ]: 67455 : if (IsA(bitmapqual, BitmapAndPath))
969 : : {
970 : 8071 : BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
971 : :
972 [ + - + + : 24213 : foreach(l, apath->bitmapquals)
+ + ]
973 : : {
974 : 16142 : result += get_indexpath_pages((Path *) lfirst(l));
975 : : }
976 : : }
977 [ + + ]: 59384 : else if (IsA(bitmapqual, BitmapOrPath))
978 : : {
979 : 30 : BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
980 : :
981 [ + - + + : 90 : foreach(l, opath->bitmapquals)
+ + ]
982 : : {
983 : 60 : result += get_indexpath_pages((Path *) lfirst(l));
984 : : }
985 : : }
986 [ + - ]: 59354 : else if (IsA(bitmapqual, IndexPath))
987 : : {
988 : 59354 : IndexPath *ipath = (IndexPath *) bitmapqual;
989 : :
990 : 59354 : result = (double) ipath->indexinfo->pages;
991 : : }
992 : : else
6417 tgl@sss.pgh.pa.us 993 [ # # ]:UBC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
994 : :
6417 tgl@sss.pgh.pa.us 995 :CBC 67455 : return result;
996 : : }
997 : :
998 : : /*
999 : : * cost_bitmap_heap_scan
1000 : : * Determines and returns the cost of scanning a relation using a bitmap
1001 : : * index-then-heap plan.
1002 : : *
1003 : : * 'baserel' is the relation to be scanned
1004 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1005 : : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
1006 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
1007 : : * estimates of caching behavior
1008 : : *
1009 : : * Note: the component IndexPaths in bitmapqual should have been costed
1010 : : * using the same loop_count.
1011 : : */
1012 : : void
6888 1013 : 214411 : cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
1014 : : ParamPathInfo *param_info,
1015 : : Path *bitmapqual, double loop_count)
1016 : : {
6935 1017 : 214411 : Cost startup_cost = 0;
1018 : 214411 : Cost run_cost = 0;
1019 : : Cost indexTotalCost;
1020 : : QualCost qpqual_cost;
1021 : : Cost cpu_per_tuple;
1022 : : Cost cost_per_page;
1023 : : Cost cpu_run_cost;
1024 : : double tuples_fetched;
1025 : : double pages_fetched;
1026 : : double spc_seq_page_cost,
1027 : : spc_random_page_cost;
1028 : : double T;
1029 : :
1030 : : /* Should only be applied to base relations */
1031 [ - + ]: 214411 : Assert(IsA(baserel, RelOptInfo));
1032 [ - + ]: 214411 : Assert(baserel->relid > 0);
1033 [ - + ]: 214411 : Assert(baserel->rtekind == RTE_RELATION);
1034 : :
1035 : : /* Mark the path with the correct row estimate */
4378 1036 [ + + ]: 214411 : if (param_info)
1037 : 87974 : path->rows = param_info->ppi_rows;
1038 : : else
4461 1039 : 126437 : path->rows = baserel->rows;
1040 : :
6933 1041 [ + + ]: 214411 : if (!enable_bitmapscan)
1042 : 4739 : startup_cost += disable_cost;
1043 : :
2634 rhaas@postgresql.org 1044 : 214411 : pages_fetched = compute_bitmap_pages(root, baserel, bitmapqual,
1045 : : loop_count, &indexTotalCost,
1046 : : &tuples_fetched);
1047 : :
6933 tgl@sss.pgh.pa.us 1048 : 214411 : startup_cost += indexTotalCost;
2634 rhaas@postgresql.org 1049 [ + + ]: 214411 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
1050 : :
1051 : : /* Fetch estimated page costs for tablespace containing table. */
5213 1052 : 214411 : get_tablespace_page_costs(baserel->reltablespace,
1053 : : &spc_random_page_cost,
1054 : : &spc_seq_page_cost);
1055 : :
1056 : : /*
1057 : : * For small numbers of pages we should charge spc_random_page_cost
1058 : : * apiece, while if nearly all the table's pages are being read, it's more
1059 : : * appropriate to charge spc_seq_page_cost apiece. The effect is
1060 : : * nonlinear, too. For lack of a better idea, interpolate like this to
1061 : : * determine the cost per page.
1062 : : */
6932 tgl@sss.pgh.pa.us 1063 [ + + ]: 214411 : if (pages_fetched >= 2.0)
5213 rhaas@postgresql.org 1064 : 41311 : cost_per_page = spc_random_page_cost -
1065 : 41311 : (spc_random_page_cost - spc_seq_page_cost)
1066 : 41311 : * sqrt(pages_fetched / T);
1067 : : else
1068 : 173100 : cost_per_page = spc_random_page_cost;
1069 : :
6933 tgl@sss.pgh.pa.us 1070 : 214411 : run_cost += pages_fetched * cost_per_page;
1071 : :
1072 : : /*
1073 : : * Estimate CPU costs per tuple.
1074 : : *
1075 : : * Often the indexquals don't need to be rechecked at each tuple ... but
1076 : : * not always, especially not if there are enough tuples involved that the
1077 : : * bitmaps become lossy. For the moment, just assume they will be
1078 : : * rechecked always. This means we charge the full freight for all the
1079 : : * scan clauses.
1080 : : */
4378 1081 : 214411 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1082 : :
1083 : 214411 : startup_cost += qpqual_cost.startup;
1084 : 214411 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
2594 rhaas@postgresql.org 1085 : 214411 : cpu_run_cost = cpu_per_tuple * tuples_fetched;
1086 : :
1087 : : /* Adjust costing for parallelism, if used. */
1088 [ + + ]: 214411 : if (path->parallel_workers > 0)
1089 : : {
1090 : 2087 : double parallel_divisor = get_parallel_divisor(path);
1091 : :
1092 : : /* The CPU cost is divided among all the workers. */
1093 : 2087 : cpu_run_cost /= parallel_divisor;
1094 : :
1095 : 2087 : path->rows = clamp_row_est(path->rows / parallel_divisor);
1096 : : }
1097 : :
1098 : :
1099 : 214411 : run_cost += cpu_run_cost;
1100 : :
1101 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 tgl@sss.pgh.pa.us 1102 : 214411 : startup_cost += path->pathtarget->cost.startup;
1103 : 214411 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1104 : :
6935 1105 : 214411 : path->startup_cost = startup_cost;
1106 : 214411 : path->total_cost = startup_cost + run_cost;
1107 : 214411 : }
1108 : :
1109 : : /*
1110 : : * cost_bitmap_tree_node
1111 : : * Extract cost and selectivity from a bitmap tree node (index/and/or)
1112 : : */
1113 : : void
6933 1114 : 392770 : cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
1115 : : {
1116 [ + + ]: 392770 : if (IsA(path, IndexPath))
1117 : : {
1118 : 374024 : *cost = ((IndexPath *) path)->indextotalcost;
1119 : 374024 : *selec = ((IndexPath *) path)->indexselectivity;
1120 : :
1121 : : /*
1122 : : * Charge a small amount per retrieved tuple to reflect the costs of
1123 : : * manipulating the bitmap. This is mostly to make sure that a bitmap
1124 : : * scan doesn't look to be the same cost as an indexscan to retrieve a
1125 : : * single tuple.
1126 : : */
4461 1127 : 374024 : *cost += 0.1 * cpu_operator_cost * path->rows;
1128 : : }
6933 1129 [ + + ]: 18746 : else if (IsA(path, BitmapAndPath))
1130 : : {
1131 : 17076 : *cost = path->total_cost;
1132 : 17076 : *selec = ((BitmapAndPath *) path)->bitmapselectivity;
1133 : : }
1134 [ + - ]: 1670 : else if (IsA(path, BitmapOrPath))
1135 : : {
1136 : 1670 : *cost = path->total_cost;
1137 : 1670 : *selec = ((BitmapOrPath *) path)->bitmapselectivity;
1138 : : }
1139 : : else
1140 : : {
6933 tgl@sss.pgh.pa.us 1141 [ # # ]:UBC 0 : elog(ERROR, "unrecognized node type: %d", nodeTag(path));
1142 : : *cost = *selec = 0; /* keep compiler quiet */
1143 : : }
6933 tgl@sss.pgh.pa.us 1144 :CBC 392770 : }
1145 : :
1146 : : /*
1147 : : * cost_bitmap_and_node
1148 : : * Estimate the cost of a BitmapAnd node
1149 : : *
1150 : : * Note that this considers only the costs of index scanning and bitmap
1151 : : * creation, not the eventual heap access. In that sense the object isn't
1152 : : * truly a Path, but it has enough path-like properties (costs in particular)
1153 : : * to warrant treating it as one. We don't bother to set the path rows field,
1154 : : * however.
1155 : : */
1156 : : void
6888 1157 : 17038 : cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
1158 : : {
1159 : : Cost totalCost;
1160 : : Selectivity selec;
1161 : : ListCell *l;
1162 : :
1163 : : /*
1164 : : * We estimate AND selectivity on the assumption that the inputs are
1165 : : * independent. This is probably often wrong, but we don't have the info
1166 : : * to do better.
1167 : : *
1168 : : * The runtime cost of the BitmapAnd itself is estimated at 100x
1169 : : * cpu_operator_cost for each tbm_intersect needed. Probably too small,
1170 : : * definitely too simplistic?
1171 : : */
6933 1172 : 17038 : totalCost = 0.0;
1173 : 17038 : selec = 1.0;
1174 [ + - + + : 51114 : foreach(l, path->bitmapquals)
+ + ]
1175 : : {
6756 bruce@momjian.us 1176 : 34076 : Path *subpath = (Path *) lfirst(l);
1177 : : Cost subCost;
1178 : : Selectivity subselec;
1179 : :
6933 tgl@sss.pgh.pa.us 1180 : 34076 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1181 : :
1182 : 34076 : selec *= subselec;
1183 : :
1184 : 34076 : totalCost += subCost;
1185 [ + + ]: 34076 : if (l != list_head(path->bitmapquals))
1186 : 17038 : totalCost += 100.0 * cpu_operator_cost;
1187 : : }
1188 : 17038 : path->bitmapselectivity = selec;
4461 1189 : 17038 : path->path.rows = 0; /* per above, not used */
6933 1190 : 17038 : path->path.startup_cost = totalCost;
1191 : 17038 : path->path.total_cost = totalCost;
1192 : 17038 : }
1193 : :
1194 : : /*
1195 : : * cost_bitmap_or_node
1196 : : * Estimate the cost of a BitmapOr node
1197 : : *
1198 : : * See comments for cost_bitmap_and_node.
1199 : : */
1200 : : void
6888 1201 : 480 : cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
1202 : : {
1203 : : Cost totalCost;
1204 : : Selectivity selec;
1205 : : ListCell *l;
1206 : :
1207 : : /*
1208 : : * We estimate OR selectivity on the assumption that the inputs are
1209 : : * non-overlapping, since that's often the case in "x IN (list)" type
1210 : : * situations. Of course, we clamp to 1.0 at the end.
1211 : : *
1212 : : * The runtime cost of the BitmapOr itself is estimated at 100x
1213 : : * cpu_operator_cost for each tbm_union needed. Probably too small,
1214 : : * definitely too simplistic? We are aware that the tbm_unions are
1215 : : * optimized out when the inputs are BitmapIndexScans.
1216 : : */
6933 1217 : 480 : totalCost = 0.0;
1218 : 480 : selec = 0.0;
1219 [ + - + + : 1470 : foreach(l, path->bitmapquals)
+ + ]
1220 : : {
6756 bruce@momjian.us 1221 : 990 : Path *subpath = (Path *) lfirst(l);
1222 : : Cost subCost;
1223 : : Selectivity subselec;
1224 : :
6933 tgl@sss.pgh.pa.us 1225 : 990 : cost_bitmap_tree_node(subpath, &subCost, &subselec);
1226 : :
1227 : 990 : selec += subselec;
1228 : :
1229 : 990 : totalCost += subCost;
1230 [ + + ]: 990 : if (l != list_head(path->bitmapquals) &&
1231 [ + + ]: 510 : !IsA(subpath, IndexPath))
1232 : 15 : totalCost += 100.0 * cpu_operator_cost;
1233 : : }
1234 [ + - ]: 480 : path->bitmapselectivity = Min(selec, 1.0);
4461 1235 : 480 : path->path.rows = 0; /* per above, not used */
6933 1236 : 480 : path->path.startup_cost = totalCost;
1237 : 480 : path->path.total_cost = totalCost;
1238 : 480 : }
1239 : :
1240 : : /*
1241 : : * cost_tidscan
1242 : : * Determines and returns the cost of scanning a relation using TIDs.
1243 : : *
1244 : : * 'baserel' is the relation to be scanned
1245 : : * 'tidquals' is the list of TID-checkable quals
1246 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1247 : : */
1248 : : void
6888 1249 : 378 : cost_tidscan(Path *path, PlannerInfo *root,
1250 : : RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
1251 : : {
8825 1252 : 378 : Cost startup_cost = 0;
1253 : 378 : Cost run_cost = 0;
6017 1254 : 378 : bool isCurrentOf = false;
1255 : : QualCost qpqual_cost;
1256 : : Cost cpu_per_tuple;
1257 : : QualCost tid_qual_cost;
1258 : : double ntuples;
1259 : : ListCell *l;
1260 : : double spc_random_page_cost;
1261 : :
1262 : : /* Should only be applied to base relations */
7736 1263 [ - + ]: 378 : Assert(baserel->relid > 0);
8008 1264 [ - + ]: 378 : Assert(baserel->rtekind == RTE_RELATION);
1265 : :
1266 : : /* Mark the path with the correct row estimate */
4249 1267 [ + + ]: 378 : if (param_info)
1268 : 72 : path->rows = param_info->ppi_rows;
1269 : : else
1270 : 306 : path->rows = baserel->rows;
1271 : :
1272 : : /* Count how many tuples we expect to retrieve */
6714 1273 : 378 : ntuples = 0;
1274 [ + - + + : 768 : foreach(l, tidquals)
+ + ]
1275 : : {
1932 1276 : 390 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
1277 : 390 : Expr *qual = rinfo->clause;
1278 : :
1279 [ + + ]: 390 : if (IsA(qual, ScalarArrayOpExpr))
1280 : : {
1281 : : /* Each element of the array yields 1 tuple */
1282 : 15 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) qual;
6402 bruce@momjian.us 1283 : 15 : Node *arraynode = (Node *) lsecond(saop->args);
1284 : :
101 tgl@sss.pgh.pa.us 1285 :GNC 15 : ntuples += estimate_array_length(root, arraynode);
1286 : : }
1932 tgl@sss.pgh.pa.us 1287 [ + + ]:CBC 375 : else if (IsA(qual, CurrentOfExpr))
1288 : : {
1289 : : /* CURRENT OF yields 1 tuple */
6017 1290 : 196 : isCurrentOf = true;
1291 : 196 : ntuples++;
1292 : : }
1293 : : else
1294 : : {
1295 : : /* It's just CTID = something, count 1 tuple */
6714 1296 : 179 : ntuples++;
1297 : : }
1298 : : }
1299 : :
1300 : : /*
1301 : : * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c
1302 : : * understands how to do it correctly. Therefore, honor enable_tidscan
1303 : : * only when CURRENT OF isn't present. Also note that cost_qual_eval
1304 : : * counts a CurrentOfExpr as having startup cost disable_cost, which we
1305 : : * subtract off here; that's to prevent other plan types such as seqscan
1306 : : * from winning.
1307 : : */
6017 1308 [ + + ]: 378 : if (isCurrentOf)
1309 : : {
1310 [ - + ]: 196 : Assert(baserel->baserestrictcost.startup >= disable_cost);
1311 : 196 : startup_cost -= disable_cost;
1312 : : }
1313 [ - + ]: 182 : else if (!enable_tidscan)
6017 tgl@sss.pgh.pa.us 1314 :UBC 0 : startup_cost += disable_cost;
1315 : :
1316 : : /*
1317 : : * The TID qual expressions will be computed once, any other baserestrict
1318 : : * quals once per retrieved tuple.
1319 : : */
6152 tgl@sss.pgh.pa.us 1320 :CBC 378 : cost_qual_eval(&tid_qual_cost, tidquals, root);
1321 : :
1322 : : /* fetch estimated page cost for tablespace containing table */
5213 rhaas@postgresql.org 1323 : 378 : get_tablespace_page_costs(baserel->reltablespace,
1324 : : &spc_random_page_cost,
1325 : : NULL);
1326 : :
1327 : : /* disk costs --- assume each tuple on a different page */
1328 : 378 : run_cost += spc_random_page_cost * ntuples;
1329 : :
1330 : : /* Add scanning CPU costs */
4249 tgl@sss.pgh.pa.us 1331 : 378 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1332 : :
1333 : : /* XXX currently we assume TID quals are a subset of qpquals */
1334 : 378 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1335 : 378 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
6152 1336 : 378 : tid_qual_cost.per_tuple;
8825 1337 : 378 : run_cost += cpu_per_tuple * ntuples;
1338 : :
1339 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 1340 : 378 : startup_cost += path->pathtarget->cost.startup;
1341 : 378 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1342 : :
8825 1343 : 378 : path->startup_cost = startup_cost;
1344 : 378 : path->total_cost = startup_cost + run_cost;
8909 bruce@momjian.us 1345 : 378 : }
1346 : :
1347 : : /*
1348 : : * cost_tidrangescan
1349 : : * Determines and sets the costs of scanning a relation using a range of
1350 : : * TIDs for 'path'
1351 : : *
1352 : : * 'baserel' is the relation to be scanned
1353 : : * 'tidrangequals' is the list of TID-checkable range quals
1354 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1355 : : */
1356 : : void
1142 drowley@postgresql.o 1357 : 101 : cost_tidrangescan(Path *path, PlannerInfo *root,
1358 : : RelOptInfo *baserel, List *tidrangequals,
1359 : : ParamPathInfo *param_info)
1360 : : {
1361 : : Selectivity selectivity;
1362 : : double pages;
1363 : 101 : Cost startup_cost = 0;
1364 : 101 : Cost run_cost = 0;
1365 : : QualCost qpqual_cost;
1366 : : Cost cpu_per_tuple;
1367 : : QualCost tid_qual_cost;
1368 : : double ntuples;
1369 : : double nseqpages;
1370 : : double spc_random_page_cost;
1371 : : double spc_seq_page_cost;
1372 : :
1373 : : /* Should only be applied to base relations */
1374 [ - + ]: 101 : Assert(baserel->relid > 0);
1375 [ - + ]: 101 : Assert(baserel->rtekind == RTE_RELATION);
1376 : :
1377 : : /* Mark the path with the correct row estimate */
1378 [ - + ]: 101 : if (param_info)
1142 drowley@postgresql.o 1379 :UBC 0 : path->rows = param_info->ppi_rows;
1380 : : else
1142 drowley@postgresql.o 1381 :CBC 101 : path->rows = baserel->rows;
1382 : :
1383 : : /* Count how many tuples and pages we expect to scan */
1384 : 101 : selectivity = clauselist_selectivity(root, tidrangequals, baserel->relid,
1385 : : JOIN_INNER, NULL);
1386 : 101 : pages = ceil(selectivity * baserel->pages);
1387 : :
1388 [ + + ]: 101 : if (pages <= 0.0)
1389 : 21 : pages = 1.0;
1390 : :
1391 : : /*
1392 : : * The first page in a range requires a random seek, but each subsequent
1393 : : * page is just a normal sequential page read. NOTE: it's desirable for
1394 : : * TID Range Scans to cost more than the equivalent Sequential Scans,
1395 : : * because Seq Scans have some performance advantages such as scan
1396 : : * synchronization and parallelizability, and we'd prefer one of them to
1397 : : * be picked unless a TID Range Scan really is better.
1398 : : */
1399 : 101 : ntuples = selectivity * baserel->tuples;
1400 : 101 : nseqpages = pages - 1.0;
1401 : :
1402 [ - + ]: 101 : if (!enable_tidscan)
1142 drowley@postgresql.o 1403 :UBC 0 : startup_cost += disable_cost;
1404 : :
1405 : : /*
1406 : : * The TID qual expressions will be computed once, any other baserestrict
1407 : : * quals once per retrieved tuple.
1408 : : */
1142 drowley@postgresql.o 1409 :CBC 101 : cost_qual_eval(&tid_qual_cost, tidrangequals, root);
1410 : :
1411 : : /* fetch estimated page cost for tablespace containing table */
1412 : 101 : get_tablespace_page_costs(baserel->reltablespace,
1413 : : &spc_random_page_cost,
1414 : : &spc_seq_page_cost);
1415 : :
1416 : : /* disk costs; 1 random page and the remainder as seq pages */
1417 : 101 : run_cost += spc_random_page_cost + spc_seq_page_cost * nseqpages;
1418 : :
1419 : : /* Add scanning CPU costs */
1420 : 101 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1421 : :
1422 : : /*
1423 : : * XXX currently we assume TID quals are a subset of qpquals at this
1424 : : * point; they will be removed (if possible) when we create the plan, so
1425 : : * we subtract their cost from the total qpqual cost. (If the TID quals
1426 : : * can't be removed, this is a mistake and we're going to underestimate
1427 : : * the CPU cost a bit.)
1428 : : */
1429 : 101 : startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
1430 : 101 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
1431 : 101 : tid_qual_cost.per_tuple;
1432 : 101 : run_cost += cpu_per_tuple * ntuples;
1433 : :
1434 : : /* tlist eval costs are paid per output row, not per tuple scanned */
1435 : 101 : startup_cost += path->pathtarget->cost.startup;
1436 : 101 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1437 : :
1438 : 101 : path->startup_cost = startup_cost;
1439 : 101 : path->total_cost = startup_cost + run_cost;
1440 : 101 : }
1441 : :
1442 : : /*
1443 : : * cost_subqueryscan
1444 : : * Determines and returns the cost of scanning a subquery RTE.
1445 : : *
1446 : : * 'baserel' is the relation to be scanned
1447 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1448 : : * 'trivial_pathtarget' is true if the pathtarget is believed to be trivial.
1449 : : */
1450 : : void
2960 tgl@sss.pgh.pa.us 1451 : 19387 : cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
1452 : : RelOptInfo *baserel, ParamPathInfo *param_info,
1453 : : bool trivial_pathtarget)
1454 : : {
1455 : : Cost startup_cost;
1456 : : Cost run_cost;
1457 : : List *qpquals;
1458 : : QualCost qpqual_cost;
1459 : : Cost cpu_per_tuple;
1460 : :
1461 : : /* Should only be applied to base relations that are subqueries */
7580 1462 [ - + ]: 19387 : Assert(baserel->relid > 0);
1463 [ - + ]: 19387 : Assert(baserel->rtekind == RTE_SUBQUERY);
1464 : :
1465 : : /*
1466 : : * We compute the rowcount estimate as the subplan's estimate times the
1467 : : * selectivity of relevant restriction clauses. In simple cases this will
1468 : : * come out the same as baserel->rows; but when dealing with parallelized
1469 : : * paths we must do it like this to get the right answer.
1470 : : */
4378 1471 [ + + ]: 19387 : if (param_info)
711 1472 : 243 : qpquals = list_concat_copy(param_info->ppi_clauses,
1473 : 243 : baserel->baserestrictinfo);
1474 : : else
1475 : 19144 : qpquals = baserel->baserestrictinfo;
1476 : :
1477 : 19387 : path->path.rows = clamp_row_est(path->subpath->rows *
1478 : 19387 : clauselist_selectivity(root,
1479 : : qpquals,
1480 : : 0,
1481 : : JOIN_INNER,
1482 : : NULL));
1483 : :
1484 : : /*
1485 : : * Cost of path is cost of evaluating the subplan, plus cost of evaluating
1486 : : * any restriction clauses and tlist that will be attached to the
1487 : : * SubqueryScan node, plus cpu_tuple_cost to account for selection and
1488 : : * projection overhead.
1489 : : */
2960 1490 : 19387 : path->path.startup_cost = path->subpath->startup_cost;
1491 : 19387 : path->path.total_cost = path->subpath->total_cost;
1492 : :
1493 : : /*
1494 : : * However, if there are no relevant restriction clauses and the
1495 : : * pathtarget is trivial, then we expect that setrefs.c will optimize away
1496 : : * the SubqueryScan plan node altogether, so we should just make its cost
1497 : : * and rowcount equal to the input path's.
1498 : : *
1499 : : * Note: there are some edge cases where createplan.c will apply a
1500 : : * different targetlist to the SubqueryScan node, thus falsifying our
1501 : : * current estimate of whether the target is trivial, and making the cost
1502 : : * estimate (though not the rowcount) wrong. It does not seem worth the
1503 : : * extra complication to try to account for that exactly, especially since
1504 : : * that behavior falsifies other cost estimates as well.
1505 : : */
635 1506 [ + + + + ]: 19387 : if (qpquals == NIL && trivial_pathtarget)
1507 : 8325 : return;
1508 : :
4378 1509 : 11062 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1510 : :
1511 : 11062 : startup_cost = qpqual_cost.startup;
1512 : 11062 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
711 1513 : 11062 : run_cost = cpu_per_tuple * path->subpath->rows;
1514 : :
1515 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2960 1516 : 11062 : startup_cost += path->path.pathtarget->cost.startup;
1517 : 11062 : run_cost += path->path.pathtarget->cost.per_tuple * path->path.rows;
1518 : :
1519 : 11062 : path->path.startup_cost += startup_cost;
1520 : 11062 : path->path.total_cost += startup_cost + run_cost;
1521 : : }
1522 : :
1523 : : /*
1524 : : * cost_functionscan
1525 : : * Determines and returns the cost of scanning a function RTE.
1526 : : *
1527 : : * 'baserel' is the relation to be scanned
1528 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1529 : : */
1530 : : void
4268 1531 : 21525 : cost_functionscan(Path *path, PlannerInfo *root,
1532 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1533 : : {
8008 1534 : 21525 : Cost startup_cost = 0;
1535 : 21525 : Cost run_cost = 0;
1536 : : QualCost qpqual_cost;
1537 : : Cost cpu_per_tuple;
1538 : : RangeTblEntry *rte;
1539 : : QualCost exprcost;
1540 : :
1541 : : /* Should only be applied to base relations that are functions */
7736 1542 [ - + ]: 21525 : Assert(baserel->relid > 0);
6203 1543 [ + - ]: 21525 : rte = planner_rt_fetch(baserel->relid, root);
6292 1544 [ - + ]: 21525 : Assert(rte->rtekind == RTE_FUNCTION);
1545 : :
1546 : : /* Mark the path with the correct row estimate */
4268 1547 [ + + ]: 21525 : if (param_info)
1548 : 3871 : path->rows = param_info->ppi_rows;
1549 : : else
1550 : 17654 : path->rows = baserel->rows;
1551 : :
1552 : : /*
1553 : : * Estimate costs of executing the function expression(s).
1554 : : *
1555 : : * Currently, nodeFunctionscan.c always executes the functions to
1556 : : * completion before returning any rows, and caches the results in a
1557 : : * tuplestore. So the function eval cost is all startup cost, and per-row
1558 : : * costs are minimal.
1559 : : *
1560 : : * XXX in principle we ought to charge tuplestore spill costs if the
1561 : : * number of rows is large. However, given how phony our rowcount
1562 : : * estimates for functions tend to be, there's not a lot of point in that
1563 : : * refinement right now.
1564 : : */
3797 1565 : 21525 : cost_qual_eval_node(&exprcost, (Node *) rte->functions, root);
1566 : :
5328 1567 : 21525 : startup_cost += exprcost.startup + exprcost.per_tuple;
1568 : :
1569 : : /* Add scanning CPU costs */
4268 1570 : 21525 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1571 : :
1572 : 21525 : startup_cost += qpqual_cost.startup;
1573 : 21525 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
8008 1574 : 21525 : run_cost += cpu_per_tuple * baserel->tuples;
1575 : :
1576 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 1577 : 21525 : startup_cost += path->pathtarget->cost.startup;
1578 : 21525 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1579 : :
8008 1580 : 21525 : path->startup_cost = startup_cost;
1581 : 21525 : path->total_cost = startup_cost + run_cost;
1582 : 21525 : }
1583 : :
1584 : : /*
1585 : : * cost_tablefuncscan
1586 : : * Determines and returns the cost of scanning a table function.
1587 : : *
1588 : : * 'baserel' is the relation to be scanned
1589 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1590 : : */
1591 : : void
2594 alvherre@alvh.no-ip. 1592 : 254 : cost_tablefuncscan(Path *path, PlannerInfo *root,
1593 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1594 : : {
1595 : 254 : Cost startup_cost = 0;
1596 : 254 : Cost run_cost = 0;
1597 : : QualCost qpqual_cost;
1598 : : Cost cpu_per_tuple;
1599 : : RangeTblEntry *rte;
1600 : : QualCost exprcost;
1601 : :
1602 : : /* Should only be applied to base relations that are functions */
1603 [ - + ]: 254 : Assert(baserel->relid > 0);
1604 [ + - ]: 254 : rte = planner_rt_fetch(baserel->relid, root);
1605 [ - + ]: 254 : Assert(rte->rtekind == RTE_TABLEFUNC);
1606 : :
1607 : : /* Mark the path with the correct row estimate */
1608 [ + + ]: 254 : if (param_info)
1609 : 117 : path->rows = param_info->ppi_rows;
1610 : : else
1611 : 137 : path->rows = baserel->rows;
1612 : :
1613 : : /*
1614 : : * Estimate costs of executing the table func expression(s).
1615 : : *
1616 : : * XXX in principle we ought to charge tuplestore spill costs if the
1617 : : * number of rows is large. However, given how phony our rowcount
1618 : : * estimates for tablefuncs tend to be, there's not a lot of point in that
1619 : : * refinement right now.
1620 : : */
1621 : 254 : cost_qual_eval_node(&exprcost, (Node *) rte->tablefunc, root);
1622 : :
1623 : 254 : startup_cost += exprcost.startup + exprcost.per_tuple;
1624 : :
1625 : : /* Add scanning CPU costs */
1626 : 254 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1627 : :
1628 : 254 : startup_cost += qpqual_cost.startup;
1629 : 254 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1630 : 254 : run_cost += cpu_per_tuple * baserel->tuples;
1631 : :
1632 : : /* tlist eval costs are paid per output row, not per tuple scanned */
1633 : 254 : startup_cost += path->pathtarget->cost.startup;
1634 : 254 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1635 : :
1636 : 254 : path->startup_cost = startup_cost;
1637 : 254 : path->total_cost = startup_cost + run_cost;
1638 : 254 : }
1639 : :
1640 : : /*
1641 : : * cost_valuesscan
1642 : : * Determines and returns the cost of scanning a VALUES RTE.
1643 : : *
1644 : : * 'baserel' is the relation to be scanned
1645 : : * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL
1646 : : */
1647 : : void
4263 tgl@sss.pgh.pa.us 1648 : 3858 : cost_valuesscan(Path *path, PlannerInfo *root,
1649 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1650 : : {
6465 mail@joeconway.com 1651 : 3858 : Cost startup_cost = 0;
1652 : 3858 : Cost run_cost = 0;
1653 : : QualCost qpqual_cost;
1654 : : Cost cpu_per_tuple;
1655 : :
1656 : : /* Should only be applied to base relations that are values lists */
1657 [ - + ]: 3858 : Assert(baserel->relid > 0);
1658 [ - + ]: 3858 : Assert(baserel->rtekind == RTE_VALUES);
1659 : :
1660 : : /* Mark the path with the correct row estimate */
4263 tgl@sss.pgh.pa.us 1661 [ + + ]: 3858 : if (param_info)
1662 : 24 : path->rows = param_info->ppi_rows;
1663 : : else
1664 : 3834 : path->rows = baserel->rows;
1665 : :
1666 : : /*
1667 : : * For now, estimate list evaluation cost at one operator eval per list
1668 : : * (probably pretty bogus, but is it worth being smarter?)
1669 : : */
6465 mail@joeconway.com 1670 : 3858 : cpu_per_tuple = cpu_operator_cost;
1671 : :
1672 : : /* Add scanning CPU costs */
4263 tgl@sss.pgh.pa.us 1673 : 3858 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1674 : :
1675 : 3858 : startup_cost += qpqual_cost.startup;
1676 : 3858 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
6465 mail@joeconway.com 1677 : 3858 : run_cost += cpu_per_tuple * baserel->tuples;
1678 : :
1679 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 tgl@sss.pgh.pa.us 1680 : 3858 : startup_cost += path->pathtarget->cost.startup;
1681 : 3858 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1682 : :
6465 mail@joeconway.com 1683 : 3858 : path->startup_cost = startup_cost;
1684 : 3858 : path->total_cost = startup_cost + run_cost;
1685 : 3858 : }
1686 : :
1687 : : /*
1688 : : * cost_ctescan
1689 : : * Determines and returns the cost of scanning a CTE RTE.
1690 : : *
1691 : : * Note: this is used for both self-reference and regular CTEs; the
1692 : : * possible cost differences are below the threshold of what we could
1693 : : * estimate accurately anyway. Note that the costs of evaluating the
1694 : : * referenced CTE query are added into the final plan as initplan costs,
1695 : : * and should NOT be counted here.
1696 : : */
1697 : : void
4249 tgl@sss.pgh.pa.us 1698 : 2009 : cost_ctescan(Path *path, PlannerInfo *root,
1699 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1700 : : {
5671 1701 : 2009 : Cost startup_cost = 0;
1702 : 2009 : Cost run_cost = 0;
1703 : : QualCost qpqual_cost;
1704 : : Cost cpu_per_tuple;
1705 : :
1706 : : /* Should only be applied to base relations that are CTEs */
1707 [ - + ]: 2009 : Assert(baserel->relid > 0);
1708 [ - + ]: 2009 : Assert(baserel->rtekind == RTE_CTE);
1709 : :
1710 : : /* Mark the path with the correct row estimate */
4249 1711 [ - + ]: 2009 : if (param_info)
4249 tgl@sss.pgh.pa.us 1712 :UBC 0 : path->rows = param_info->ppi_rows;
1713 : : else
4249 tgl@sss.pgh.pa.us 1714 :CBC 2009 : path->rows = baserel->rows;
1715 : :
1716 : : /* Charge one CPU tuple cost per row for tuplestore manipulation */
5671 1717 : 2009 : cpu_per_tuple = cpu_tuple_cost;
1718 : :
1719 : : /* Add scanning CPU costs */
4249 1720 : 2009 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1721 : :
1722 : 2009 : startup_cost += qpqual_cost.startup;
1723 : 2009 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
5671 1724 : 2009 : run_cost += cpu_per_tuple * baserel->tuples;
1725 : :
1726 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 1727 : 2009 : startup_cost += path->pathtarget->cost.startup;
1728 : 2009 : run_cost += path->pathtarget->cost.per_tuple * path->rows;
1729 : :
5671 1730 : 2009 : path->startup_cost = startup_cost;
1731 : 2009 : path->total_cost = startup_cost + run_cost;
1732 : 2009 : }
1733 : :
1734 : : /*
1735 : : * cost_namedtuplestorescan
1736 : : * Determines and returns the cost of scanning a named tuplestore.
1737 : : */
1738 : : void
2571 kgrittn@postgresql.o 1739 : 223 : cost_namedtuplestorescan(Path *path, PlannerInfo *root,
1740 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1741 : : {
1742 : 223 : Cost startup_cost = 0;
1743 : 223 : Cost run_cost = 0;
1744 : : QualCost qpqual_cost;
1745 : : Cost cpu_per_tuple;
1746 : :
1747 : : /* Should only be applied to base relations that are Tuplestores */
1748 [ - + ]: 223 : Assert(baserel->relid > 0);
1749 [ - + ]: 223 : Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
1750 : :
1751 : : /* Mark the path with the correct row estimate */
1752 [ - + ]: 223 : if (param_info)
2571 kgrittn@postgresql.o 1753 :UBC 0 : path->rows = param_info->ppi_rows;
1754 : : else
2571 kgrittn@postgresql.o 1755 :CBC 223 : path->rows = baserel->rows;
1756 : :
1757 : : /* Charge one CPU tuple cost per row for tuplestore manipulation */
1758 : 223 : cpu_per_tuple = cpu_tuple_cost;
1759 : :
1760 : : /* Add scanning CPU costs */
1761 : 223 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1762 : :
1763 : 223 : startup_cost += qpqual_cost.startup;
1764 : 223 : cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
1765 : 223 : run_cost += cpu_per_tuple * baserel->tuples;
1766 : :
1767 : 223 : path->startup_cost = startup_cost;
1768 : 223 : path->total_cost = startup_cost + run_cost;
1769 : 223 : }
1770 : :
1771 : : /*
1772 : : * cost_resultscan
1773 : : * Determines and returns the cost of scanning an RTE_RESULT relation.
1774 : : */
1775 : : void
1903 tgl@sss.pgh.pa.us 1776 : 805 : cost_resultscan(Path *path, PlannerInfo *root,
1777 : : RelOptInfo *baserel, ParamPathInfo *param_info)
1778 : : {
1779 : 805 : Cost startup_cost = 0;
1780 : 805 : Cost run_cost = 0;
1781 : : QualCost qpqual_cost;
1782 : : Cost cpu_per_tuple;
1783 : :
1784 : : /* Should only be applied to RTE_RESULT base relations */
1785 [ - + ]: 805 : Assert(baserel->relid > 0);
1786 [ - + ]: 805 : Assert(baserel->rtekind == RTE_RESULT);
1787 : :
1788 : : /* Mark the path with the correct row estimate */
1789 [ + + ]: 805 : if (param_info)
1790 : 72 : path->rows = param_info->ppi_rows;
1791 : : else
1792 : 733 : path->rows = baserel->rows;
1793 : :
1794 : : /* We charge qual cost plus cpu_tuple_cost */
1795 : 805 : get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
1796 : :
1797 : 805 : startup_cost += qpqual_cost.startup;
1798 : 805 : cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
1799 : 805 : run_cost += cpu_per_tuple * baserel->tuples;
1800 : :
1801 : 805 : path->startup_cost = startup_cost;
1802 : 805 : path->total_cost = startup_cost + run_cost;
1803 : 805 : }
1804 : :
1805 : : /*
1806 : : * cost_recursive_union
1807 : : * Determines and returns the cost of performing a recursive union,
1808 : : * and also the estimated output size.
1809 : : *
1810 : : * We are given Paths for the nonrecursive and recursive terms.
1811 : : */
1812 : : void
2960 1813 : 403 : cost_recursive_union(Path *runion, Path *nrterm, Path *rterm)
1814 : : {
1815 : : Cost startup_cost;
1816 : : Cost total_cost;
1817 : : double total_rows;
1818 : :
1819 : : /* We probably have decent estimates for the non-recursive term */
5671 1820 : 403 : startup_cost = nrterm->startup_cost;
1821 : 403 : total_cost = nrterm->total_cost;
2960 1822 : 403 : total_rows = nrterm->rows;
1823 : :
1824 : : /*
1825 : : * We arbitrarily assume that about 10 recursive iterations will be
1826 : : * needed, and that we've managed to get a good fix on the cost and output
1827 : : * size of each one of them. These are mighty shaky assumptions but it's
1828 : : * hard to see how to do better.
1829 : : */
5671 1830 : 403 : total_cost += 10 * rterm->total_cost;
2960 1831 : 403 : total_rows += 10 * rterm->rows;
1832 : :
1833 : : /*
1834 : : * Also charge cpu_tuple_cost per row to account for the costs of
1835 : : * manipulating the tuplestores. (We don't worry about possible
1836 : : * spill-to-disk costs.)
1837 : : */
5671 1838 : 403 : total_cost += cpu_tuple_cost * total_rows;
1839 : :
1840 : 403 : runion->startup_cost = startup_cost;
1841 : 403 : runion->total_cost = total_cost;
2960 1842 : 403 : runion->rows = total_rows;
1843 : 403 : runion->pathtarget->width = Max(nrterm->pathtarget->width,
1844 : : rterm->pathtarget->width);
5671 1845 : 403 : }
1846 : :
1847 : : /*
1848 : : * cost_tuplesort
1849 : : * Determines and returns the cost of sorting a relation using tuplesort,
1850 : : * not including the cost of reading the input data.
1851 : : *
1852 : : * If the total volume of data to sort is less than sort_mem, we will do
1853 : : * an in-memory sort, which requires no I/O and about t*log2(t) tuple
1854 : : * comparisons for t tuples.
1855 : : *
1856 : : * If the total volume exceeds sort_mem, we switch to a tape-style merge
1857 : : * algorithm. There will still be about t*log2(t) tuple comparisons in
1858 : : * total, but we will also need to write and read each tuple once per
1859 : : * merge pass. We expect about ceil(logM(r)) merge passes where r is the
1860 : : * number of initial runs formed and M is the merge order used by tuplesort.c.
1861 : : * Since the average initial run should be about sort_mem, we have
1862 : : * disk traffic = 2 * relsize * ceil(logM(p / sort_mem))
1863 : : * cpu = comparison_cost * t * log2(t)
1864 : : *
1865 : : * If the sort is bounded (i.e., only the first k result tuples are needed)
1866 : : * and k tuples can fit into sort_mem, we use a heap method that keeps only
1867 : : * k tuples in the heap; this will require about t*log2(k) tuple comparisons.
1868 : : *
1869 : : * The disk traffic is assumed to be 3/4ths sequential and 1/4th random
1870 : : * accesses (XXX can't we refine that guess?)
1871 : : *
1872 : : * By default, we charge two operator evals per tuple comparison, which should
1873 : : * be in the right ballpark in most cases. The caller can tweak this by
1874 : : * specifying nonzero comparison_cost; typically that's used for any extra
1875 : : * work that has to be done to prepare the inputs to the comparison operators.
1876 : : *
1877 : : * 'tuples' is the number of tuples in the relation
1878 : : * 'width' is the average tuple width in bytes
1879 : : * 'comparison_cost' is the extra cost per comparison, if any
1880 : : * 'sort_mem' is the number of kilobytes of work memory allowed for the sort
1881 : : * 'limit_tuples' is the bound on the number of output tuples; -1 if no bound
1882 : : */
1883 : : static void
559 1884 : 690540 : cost_tuplesort(Cost *startup_cost, Cost *run_cost,
1885 : : double tuples, int width,
1886 : : Cost comparison_cost, int sort_mem,
1887 : : double limit_tuples)
1888 : : {
6190 1889 : 690540 : double input_bytes = relation_byte_size(tuples, width);
1890 : : double output_bytes;
1891 : : double output_tuples;
4938 1892 : 690540 : long sort_mem_bytes = sort_mem * 1024L;
1893 : :
1894 : : /*
1895 : : * We want to be sure the cost of a sort is never estimated as zero, even
1896 : : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
1897 : : */
8862 1898 [ + + ]: 690540 : if (tuples < 2.0)
1899 : 225254 : tuples = 2.0;
1900 : :
1901 : : /* Include the default cost-per-comparison */
559 1902 : 690540 : comparison_cost += 2.0 * cpu_operator_cost;
1903 : :
1904 : : /* Do we have a useful LIMIT? */
6190 1905 [ + + + + ]: 690540 : if (limit_tuples > 0 && limit_tuples < tuples)
1906 : : {
1907 : 870 : output_tuples = limit_tuples;
1908 : 870 : output_bytes = relation_byte_size(output_tuples, width);
1909 : : }
1910 : : else
1911 : : {
1912 : 689670 : output_tuples = tuples;
1913 : 689670 : output_bytes = input_bytes;
1914 : : }
1915 : :
4938 1916 [ + + ]: 690540 : if (output_bytes > sort_mem_bytes)
1917 : : {
1918 : : /*
1919 : : * We'll have to use a disk-based sort of all the tuples
1920 : : */
6190 1921 : 8431 : double npages = ceil(input_bytes / BLCKSZ);
2928 rhaas@postgresql.org 1922 : 8431 : double nruns = input_bytes / sort_mem_bytes;
4938 tgl@sss.pgh.pa.us 1923 : 8431 : double mergeorder = tuplesort_merge_order(sort_mem_bytes);
1924 : : double log_runs;
1925 : : double npageaccesses;
1926 : :
1927 : : /*
1928 : : * CPU costs
1929 : : *
1930 : : * Assume about N log2 N comparisons
1931 : : */
559 1932 : 8431 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1933 : :
1934 : : /* Disk costs */
1935 : :
1936 : : /* Compute logM(r) as log(r) / log(M) */
6629 1937 [ + + ]: 8431 : if (nruns > mergeorder)
1938 : 2219 : log_runs = ceil(log(nruns) / log(mergeorder));
1939 : : else
8862 1940 : 6212 : log_runs = 1.0;
8825 1941 : 8431 : npageaccesses = 2.0 * npages * log_runs;
1942 : : /* Assume 3/4ths of accesses are sequential, 1/4th are not */
1469 tomas.vondra@postgre 1943 : 8431 : *startup_cost += npageaccesses *
6523 tgl@sss.pgh.pa.us 1944 : 8431 : (seq_page_cost * 0.75 + random_page_cost * 0.25);
1945 : : }
4938 1946 [ + + - + ]: 682109 : else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
1947 : : {
1948 : : /*
1949 : : * We'll use a bounded heap-sort keeping just K tuples in memory, for
1950 : : * a total number of tuple comparisons of N log2 K; but the constant
1951 : : * factor is a bit higher than for quicksort. Tweak it so that the
1952 : : * cost curve is continuous at the crossover point.
1953 : : */
559 1954 : 646 : *startup_cost = comparison_cost * tuples * LOG2(2.0 * output_tuples);
1955 : : }
1956 : : else
1957 : : {
1958 : : /* We'll use plain quicksort on all the input tuples */
1959 : 681463 : *startup_cost = comparison_cost * tuples * LOG2(tuples);
1960 : : }
1961 : :
1962 : : /*
1963 : : * Also charge a small amount (arbitrarily set equal to operator cost) per
1964 : : * extracted tuple. We don't charge cpu_tuple_cost because a Sort node
1965 : : * doesn't do qual-checking or projection, so it has less overhead than
1966 : : * most plan nodes. Note it's correct to use tuples not output_tuples
1967 : : * here --- the upper LIMIT will pro-rate the run cost so we'd be double
1968 : : * counting the LIMIT otherwise.
1969 : : */
1469 tomas.vondra@postgre 1970 : 690540 : *run_cost = cpu_operator_cost * tuples;
1971 : 690540 : }
1972 : :
1973 : : /*
1974 : : * cost_incremental_sort
1975 : : * Determines and returns the cost of sorting a relation incrementally, when
1976 : : * the input path is presorted by a prefix of the pathkeys.
1977 : : *
1978 : : * 'presorted_keys' is the number of leading pathkeys by which the input path
1979 : : * is sorted.
1980 : : *
1981 : : * We estimate the number of groups into which the relation is divided by the
1982 : : * leading pathkeys, and then calculate the cost of sorting a single group
1983 : : * with tuplesort using cost_tuplesort().
1984 : : */
1985 : : void
1986 : 3837 : cost_incremental_sort(Path *path,
1987 : : PlannerInfo *root, List *pathkeys, int presorted_keys,
1988 : : Cost input_startup_cost, Cost input_total_cost,
1989 : : double input_tuples, int width, Cost comparison_cost, int sort_mem,
1990 : : double limit_tuples)
1991 : : {
1992 : : Cost startup_cost,
1993 : : run_cost,
1994 : 3837 : input_run_cost = input_total_cost - input_startup_cost;
1995 : : double group_tuples,
1996 : : input_groups;
1997 : : Cost group_startup_cost,
1998 : : group_run_cost,
1999 : : group_input_run_cost;
2000 : 3837 : List *presortedExprs = NIL;
2001 : : ListCell *l;
1452 2002 : 3837 : bool unknown_varno = false;
2003 : :
485 drowley@postgresql.o 2004 [ + - - + ]: 3837 : Assert(presorted_keys > 0 && presorted_keys < list_length(pathkeys));
2005 : :
2006 : : /*
2007 : : * We want to be sure the cost of a sort is never estimated as zero, even
2008 : : * if passed-in tuple count is zero. Besides, mustn't do log(0)...
2009 : : */
1469 tomas.vondra@postgre 2010 [ + + ]: 3837 : if (input_tuples < 2.0)
2011 : 2364 : input_tuples = 2.0;
2012 : :
2013 : : /* Default estimate of number of groups, capped to one group per row. */
1452 2014 [ + + ]: 3837 : input_groups = Min(input_tuples, DEFAULT_NUM_DISTINCT);
2015 : :
2016 : : /*
2017 : : * Extract presorted keys as list of expressions.
2018 : : *
2019 : : * We need to be careful about Vars containing "varno 0" which might have
2020 : : * been introduced by generate_append_tlist, which would confuse
2021 : : * estimate_num_groups (in fact it'd fail for such expressions). See
2022 : : * recurse_set_operations which has to deal with the same issue.
2023 : : *
2024 : : * Unlike recurse_set_operations we can't access the original target list
2025 : : * here, and even if we could it's not very clear how useful would that be
2026 : : * for a set operation combining multiple tables. So we simply detect if
2027 : : * there are any expressions with "varno 0" and use the default
2028 : : * DEFAULT_NUM_DISTINCT in that case.
2029 : : *
2030 : : * We might also use either 1.0 (a single group) or input_tuples (each row
2031 : : * being a separate group), pretty much the worst and best case for
2032 : : * incremental sort. But those are extreme cases and using something in
2033 : : * between seems reasonable. Furthermore, generate_append_tlist is used
2034 : : * for set operations, which are likely to produce mostly unique output
2035 : : * anyway - from that standpoint the DEFAULT_NUM_DISTINCT is defensive
2036 : : * while maintaining lower startup cost.
2037 : : */
1469 2038 [ + - + - : 3906 : foreach(l, pathkeys)
+ - ]
2039 : : {
2040 : 3906 : PathKey *key = (PathKey *) lfirst(l);
2041 : 3906 : EquivalenceMember *member = (EquivalenceMember *)
331 tgl@sss.pgh.pa.us 2042 : 3906 : linitial(key->pk_eclass->ec_members);
2043 : :
2044 : : /*
2045 : : * Check if the expression contains Var with "varno 0" so that we
2046 : : * don't call estimate_num_groups in that case.
2047 : : */
1179 2048 [ + + ]: 3906 : if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr)))
2049 : : {
1452 tomas.vondra@postgre 2050 : 5 : unknown_varno = true;
2051 : 5 : break;
2052 : : }
2053 : :
2054 : : /* expression not containing any Vars with "varno 0" */
1469 2055 : 3901 : presortedExprs = lappend(presortedExprs, member->em_expr);
2056 : :
485 drowley@postgresql.o 2057 [ + + ]: 3901 : if (foreach_current_index(l) + 1 >= presorted_keys)
1469 tomas.vondra@postgre 2058 : 3832 : break;
2059 : : }
2060 : :
2061 : : /* Estimate the number of groups with equal presorted keys. */
1452 2062 [ + + ]: 3837 : if (!unknown_varno)
1111 drowley@postgresql.o 2063 : 3832 : input_groups = estimate_num_groups(root, presortedExprs, input_tuples,
2064 : : NULL, NULL);
2065 : :
1469 tomas.vondra@postgre 2066 : 3837 : group_tuples = input_tuples / input_groups;
2067 : 3837 : group_input_run_cost = input_run_cost / input_groups;
2068 : :
2069 : : /*
2070 : : * Estimate the average cost of sorting of one group where presorted keys
2071 : : * are equal.
2072 : : */
559 tgl@sss.pgh.pa.us 2073 : 3837 : cost_tuplesort(&group_startup_cost, &group_run_cost,
2074 : : group_tuples, width, comparison_cost, sort_mem,
2075 : : limit_tuples);
2076 : :
2077 : : /*
2078 : : * Startup cost of incremental sort is the startup cost of its first group
2079 : : * plus the cost of its input.
2080 : : */
485 drowley@postgresql.o 2081 : 3837 : startup_cost = group_startup_cost + input_startup_cost +
2082 : : group_input_run_cost;
2083 : :
2084 : : /*
2085 : : * After we started producing tuples from the first group, the cost of
2086 : : * producing all the tuples is given by the cost to finish processing this
2087 : : * group, plus the total cost to process the remaining groups, plus the
2088 : : * remaining cost of input.
2089 : : */
2090 : 3837 : run_cost = group_run_cost + (group_run_cost + group_startup_cost) *
2091 : 3837 : (input_groups - 1) + group_input_run_cost * (input_groups - 1);
2092 : :
2093 : : /*
2094 : : * Incremental sort adds some overhead by itself. Firstly, it has to
2095 : : * detect the sort groups. This is roughly equal to one extra copy and
2096 : : * comparison per tuple.
2097 : : */
1469 tomas.vondra@postgre 2098 : 3837 : run_cost += (cpu_tuple_cost + comparison_cost) * input_tuples;
2099 : :
2100 : : /*
2101 : : * Additionally, we charge double cpu_tuple_cost for each input group to
2102 : : * account for the tuplesort_reset that's performed after each group.
2103 : : */
2104 : 3837 : run_cost += 2.0 * cpu_tuple_cost * input_groups;
2105 : :
2106 : 3837 : path->rows = input_tuples;
2107 : 3837 : path->startup_cost = startup_cost;
2108 : 3837 : path->total_cost = startup_cost + run_cost;
2109 : 3837 : }
2110 : :
2111 : : /*
2112 : : * cost_sort
2113 : : * Determines and returns the cost of sorting a relation, including
2114 : : * the cost of reading the input data.
2115 : : *
2116 : : * NOTE: some callers currently pass NIL for pathkeys because they
2117 : : * can't conveniently supply the sort keys. Since this routine doesn't
2118 : : * currently do anything with pathkeys anyway, that doesn't matter...
2119 : : * but if it ever does, it should react gracefully to lack of key data.
2120 : : * (Actually, the thing we'd most likely be interested in is just the number
2121 : : * of sort keys, which all callers *could* supply.)
2122 : : */
2123 : : void
2124 : 686703 : cost_sort(Path *path, PlannerInfo *root,
2125 : : List *pathkeys, Cost input_cost, double tuples, int width,
2126 : : Cost comparison_cost, int sort_mem,
2127 : : double limit_tuples)
2128 : :
2129 : : {
2130 : : Cost startup_cost;
2131 : : Cost run_cost;
2132 : :
559 tgl@sss.pgh.pa.us 2133 : 686703 : cost_tuplesort(&startup_cost, &run_cost,
2134 : : tuples, width,
2135 : : comparison_cost, sort_mem,
2136 : : limit_tuples);
2137 : :
1469 tomas.vondra@postgre 2138 [ + + ]: 686703 : if (!enable_sort)
2139 : 641 : startup_cost += disable_cost;
2140 : :
2141 : 686703 : startup_cost += input_cost;
2142 : :
2143 : 686703 : path->rows = tuples;
8825 tgl@sss.pgh.pa.us 2144 : 686703 : path->startup_cost = startup_cost;
2145 : 686703 : path->total_cost = startup_cost + run_cost;
10141 scrappy@hub.org 2146 : 686703 : }
2147 : :
2148 : : /*
2149 : : * append_nonpartial_cost
2150 : : * Estimate the cost of the non-partial paths in a Parallel Append.
2151 : : * The non-partial paths are assumed to be the first "numpaths" paths
2152 : : * from the subpaths list, and to be in order of decreasing cost.
2153 : : */
2154 : : static Cost
2322 rhaas@postgresql.org 2155 : 7789 : append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
2156 : : {
2157 : : Cost *costarr;
2158 : : int arrlen;
2159 : : ListCell *l;
2160 : : ListCell *cell;
2161 : : int path_index;
2162 : : int min_index;
2163 : : int max_index;
2164 : :
2165 [ + + ]: 7789 : if (numpaths == 0)
2166 : 7022 : return 0;
2167 : :
2168 : : /*
2169 : : * Array length is number of workers or number of relevant paths,
2170 : : * whichever is less.
2171 : : */
2172 : 767 : arrlen = Min(parallel_workers, numpaths);
2173 : 767 : costarr = (Cost *) palloc(sizeof(Cost) * arrlen);
2174 : :
2175 : : /* The first few paths will each be claimed by a different worker. */
2176 : 767 : path_index = 0;
2177 [ + - + + : 2073 : foreach(cell, subpaths)
+ + ]
2178 : : {
2179 : 1719 : Path *subpath = (Path *) lfirst(cell);
2180 : :
2181 [ + + ]: 1719 : if (path_index == arrlen)
2182 : 413 : break;
2183 : 1306 : costarr[path_index++] = subpath->total_cost;
2184 : : }
2185 : :
2186 : : /*
2187 : : * Since subpaths are sorted by decreasing cost, the last one will have
2188 : : * the minimum cost.
2189 : : */
2190 : 767 : min_index = arrlen - 1;
2191 : :
2192 : : /*
2193 : : * For each of the remaining subpaths, add its cost to the array element
2194 : : * with minimum cost.
2195 : : */
1735 tgl@sss.pgh.pa.us 2196 [ + - + + : 1008 : for_each_cell(l, subpaths, cell)
+ + ]
2197 : : {
2322 rhaas@postgresql.org 2198 : 505 : Path *subpath = (Path *) lfirst(l);
2199 : :
2200 : : /* Consider only the non-partial paths */
2201 [ + + ]: 505 : if (path_index++ == numpaths)
2202 : 264 : break;
2203 : :
2204 : 241 : costarr[min_index] += subpath->total_cost;
2205 : :
2206 : : /* Update the new min cost array index */
599 drowley@postgresql.o 2207 : 241 : min_index = 0;
2208 [ + + ]: 741 : for (int i = 0; i < arrlen; i++)
2209 : : {
2322 rhaas@postgresql.org 2210 [ + + ]: 500 : if (costarr[i] < costarr[min_index])
2211 : 101 : min_index = i;
2212 : : }
2213 : : }
2214 : :
2215 : : /* Return the highest cost from the array */
599 drowley@postgresql.o 2216 : 767 : max_index = 0;
2217 [ + + ]: 2073 : for (int i = 0; i < arrlen; i++)
2218 : : {
2322 rhaas@postgresql.org 2219 [ + + ]: 1306 : if (costarr[i] > costarr[max_index])
2220 : 93 : max_index = i;
2221 : : }
2222 : :
2223 : 767 : return costarr[max_index];
2224 : : }
2225 : :
2226 : : /*
2227 : : * cost_append
2228 : : * Determines and returns the cost of an Append node.
2229 : : */
2230 : : void
559 tgl@sss.pgh.pa.us 2231 : 23465 : cost_append(AppendPath *apath)
2232 : : {
2233 : : ListCell *l;
2234 : :
2322 rhaas@postgresql.org 2235 : 23465 : apath->path.startup_cost = 0;
2236 : 23465 : apath->path.total_cost = 0;
1836 tgl@sss.pgh.pa.us 2237 : 23465 : apath->path.rows = 0;
2238 : :
2322 rhaas@postgresql.org 2239 [ + + ]: 23465 : if (apath->subpaths == NIL)
2240 : 765 : return;
2241 : :
2242 [ + + ]: 22700 : if (!apath->path.parallel_aware)
2243 : : {
1836 tgl@sss.pgh.pa.us 2244 : 14911 : List *pathkeys = apath->path.pathkeys;
2245 : :
2246 [ + + ]: 14911 : if (pathkeys == NIL)
2247 : : {
557 drowley@postgresql.o 2248 : 13911 : Path *firstsubpath = (Path *) linitial(apath->subpaths);
2249 : :
2250 : : /*
2251 : : * For an unordered, non-parallel-aware Append we take the startup
2252 : : * cost as the startup cost of the first subpath.
2253 : : */
2254 : 13911 : apath->path.startup_cost = firstsubpath->startup_cost;
2255 : :
2256 : : /* Compute rows and costs as sums of subplan rows and costs. */
1836 tgl@sss.pgh.pa.us 2257 [ + - + + : 55069 : foreach(l, apath->subpaths)
+ + ]
2258 : : {
2259 : 41158 : Path *subpath = (Path *) lfirst(l);
2260 : :
2261 : 41158 : apath->path.rows += subpath->rows;
2262 : 41158 : apath->path.total_cost += subpath->total_cost;
2263 : : }
2264 : : }
2265 : : else
2266 : : {
2267 : : /*
2268 : : * For an ordered, non-parallel-aware Append we take the startup
2269 : : * cost as the sum of the subpath startup costs. This ensures
2270 : : * that we don't underestimate the startup cost when a query's
2271 : : * LIMIT is such that several of the children have to be run to
2272 : : * satisfy it. This might be overkill --- another plausible hack
2273 : : * would be to take the Append's startup cost as the maximum of
2274 : : * the child startup costs. But we don't want to risk believing
2275 : : * that an ORDER BY LIMIT query can be satisfied at small cost
2276 : : * when the first child has small startup cost but later ones
2277 : : * don't. (If we had the ability to deal with nonlinear cost
2278 : : * interpolation for partial retrievals, we would not need to be
2279 : : * so conservative about this.)
2280 : : *
2281 : : * This case is also different from the above in that we have to
2282 : : * account for possibly injecting sorts into subpaths that aren't
2283 : : * natively ordered.
2284 : : */
2285 [ + - + + : 3906 : foreach(l, apath->subpaths)
+ + ]
2286 : : {
2287 : 2906 : Path *subpath = (Path *) lfirst(l);
2288 : : Path sort_path; /* dummy for result of cost_sort */
2289 : :
2290 [ + + ]: 2906 : if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
2291 : : {
2292 : : /*
2293 : : * We'll need to insert a Sort node, so include costs for
2294 : : * that. We can use the parent's LIMIT if any, since we
2295 : : * certainly won't pull more than that many tuples from
2296 : : * any child.
2297 : : */
2298 : 22 : cost_sort(&sort_path,
2299 : : NULL, /* doesn't currently need root */
2300 : : pathkeys,
2301 : : subpath->total_cost,
2302 : : subpath->rows,
2303 : 22 : subpath->pathtarget->width,
2304 : : 0.0,
2305 : : work_mem,
2306 : : apath->limit_tuples);
2307 : 22 : subpath = &sort_path;
2308 : : }
2309 : :
2310 : 2906 : apath->path.rows += subpath->rows;
2311 : 2906 : apath->path.startup_cost += subpath->startup_cost;
2312 : 2906 : apath->path.total_cost += subpath->total_cost;
2313 : : }
2314 : : }
2315 : : }
2316 : : else /* parallel-aware */
2317 : : {
2322 rhaas@postgresql.org 2318 : 7789 : int i = 0;
2319 : 7789 : double parallel_divisor = get_parallel_divisor(&apath->path);
2320 : :
2321 : : /* Parallel-aware Append never produces ordered output. */
1836 tgl@sss.pgh.pa.us 2322 [ - + ]: 7789 : Assert(apath->path.pathkeys == NIL);
2323 : :
2324 : : /* Calculate startup cost. */
2322 rhaas@postgresql.org 2325 [ + - + + : 31789 : foreach(l, apath->subpaths)
+ + ]
2326 : : {
2327 : 24000 : Path *subpath = (Path *) lfirst(l);
2328 : :
2329 : : /*
2330 : : * Append will start returning tuples when the child node having
2331 : : * lowest startup cost is done setting up. We consider only the
2332 : : * first few subplans that immediately get a worker assigned.
2333 : : */
2334 [ + + ]: 24000 : if (i == 0)
2335 : 7789 : apath->path.startup_cost = subpath->startup_cost;
2336 [ + + ]: 16211 : else if (i < apath->path.parallel_workers)
2337 [ + + ]: 7522 : apath->path.startup_cost = Min(apath->path.startup_cost,
2338 : : subpath->startup_cost);
2339 : :
2340 : : /*
2341 : : * Apply parallel divisor to subpaths. Scale the number of rows
2342 : : * for each partial subpath based on the ratio of the parallel
2343 : : * divisor originally used for the subpath to the one we adopted.
2344 : : * Also add the cost of partial paths to the total cost, but
2345 : : * ignore non-partial paths for now.
2346 : : */
2347 [ + + ]: 24000 : if (i < apath->first_partial_path)
2348 : 1547 : apath->path.rows += subpath->rows / parallel_divisor;
2349 : : else
2350 : : {
2351 : : double subpath_parallel_divisor;
2352 : :
2292 2353 : 22453 : subpath_parallel_divisor = get_parallel_divisor(subpath);
2354 : 22453 : apath->path.rows += subpath->rows * (subpath_parallel_divisor /
2355 : : parallel_divisor);
2322 2356 : 22453 : apath->path.total_cost += subpath->total_cost;
2357 : : }
2358 : :
2292 2359 : 24000 : apath->path.rows = clamp_row_est(apath->path.rows);
2360 : :
2322 2361 : 24000 : i++;
2362 : : }
2363 : :
2364 : : /* Add cost for non-partial subpaths. */
2365 : 7789 : apath->path.total_cost +=
2366 : 7789 : append_nonpartial_cost(apath->subpaths,
2367 : : apath->first_partial_path,
2368 : : apath->path.parallel_workers);
2369 : : }
2370 : :
2371 : : /*
2372 : : * Although Append does not do any selection or projection, it's not free;
2373 : : * add a small per-tuple overhead.
2374 : : */
2244 2375 : 22700 : apath->path.total_cost +=
2376 : 22700 : cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * apath->path.rows;
2377 : : }
2378 : :
2379 : : /*
2380 : : * cost_merge_append
2381 : : * Determines and returns the cost of a MergeAppend node.
2382 : : *
2383 : : * MergeAppend merges several pre-sorted input streams, using a heap that
2384 : : * at any given instant holds the next tuple from each stream. If there
2385 : : * are N streams, we need about N*log2(N) tuple comparisons to construct
2386 : : * the heap at startup, and then for each output tuple, about log2(N)
2387 : : * comparisons to replace the top entry.
2388 : : *
2389 : : * (The effective value of N will drop once some of the input streams are
2390 : : * exhausted, but it seems unlikely to be worth trying to account for that.)
2391 : : *
2392 : : * The heap is never spilled to disk, since we assume N is not very large.
2393 : : * So this is much simpler than cost_sort.
2394 : : *
2395 : : * As in cost_sort, we charge two operator evals per tuple comparison.
2396 : : *
2397 : : * 'pathkeys' is a list of sort keys
2398 : : * 'n_streams' is the number of input streams
2399 : : * 'input_startup_cost' is the sum of the input streams' startup costs
2400 : : * 'input_total_cost' is the sum of the input streams' total costs
2401 : : * 'tuples' is the number of tuples in all the streams
2402 : : */
2403 : : void
4931 tgl@sss.pgh.pa.us 2404 : 1888 : cost_merge_append(Path *path, PlannerInfo *root,
2405 : : List *pathkeys, int n_streams,
2406 : : Cost input_startup_cost, Cost input_total_cost,
2407 : : double tuples)
2408 : : {
2409 : 1888 : Cost startup_cost = 0;
2410 : 1888 : Cost run_cost = 0;
2411 : : Cost comparison_cost;
2412 : : double N;
2413 : : double logN;
2414 : :
2415 : : /*
2416 : : * Avoid log(0)...
2417 : : */
2418 [ + - ]: 1888 : N = (n_streams < 2) ? 2.0 : (double) n_streams;
2419 : 1888 : logN = LOG2(N);
2420 : :
2421 : : /* Assumed cost per tuple comparison */
2422 : 1888 : comparison_cost = 2.0 * cpu_operator_cost;
2423 : :
2424 : : /* Heap creation cost */
2425 : 1888 : startup_cost += comparison_cost * N * logN;
2426 : :
2427 : : /* Per-tuple heap maintenance cost */
2717 2428 : 1888 : run_cost += tuples * comparison_cost * logN;
2429 : :
2430 : : /*
2431 : : * Although MergeAppend does not do any selection or projection, it's not
2432 : : * free; add a small per-tuple overhead.
2433 : : */
2244 rhaas@postgresql.org 2434 : 1888 : run_cost += cpu_tuple_cost * APPEND_CPU_COST_MULTIPLIER * tuples;
2435 : :
4931 tgl@sss.pgh.pa.us 2436 : 1888 : path->startup_cost = startup_cost + input_startup_cost;
2437 : 1888 : path->total_cost = startup_cost + run_cost + input_total_cost;
2438 : 1888 : }
2439 : :
2440 : : /*
2441 : : * cost_material
2442 : : * Determines and returns the cost of materializing a relation, including
2443 : : * the cost of reading the input data.
2444 : : *
2445 : : * If the total volume of data to materialize exceeds work_mem, we will need
2446 : : * to write it to disk, so the cost is much higher in that case.
2447 : : *
2448 : : * Note that here we are estimating the costs for the first scan of the
2449 : : * relation, so the materialization is all overhead --- any savings will
2450 : : * occur only on rescan, which is estimated in cost_rescan.
2451 : : */
2452 : : void
7806 2453 : 211131 : cost_material(Path *path,
2454 : : Cost input_startup_cost, Cost input_total_cost,
2455 : : double tuples, int width)
2456 : : {
5328 2457 : 211131 : Cost startup_cost = input_startup_cost;
2458 : 211131 : Cost run_cost = input_total_cost - input_startup_cost;
7806 2459 : 211131 : double nbytes = relation_byte_size(tuples, width);
7376 2460 : 211131 : long work_mem_bytes = work_mem * 1024L;
2461 : :
4461 2462 : 211131 : path->rows = tuples;
2463 : :
2464 : : /*
2465 : : * Whether spilling or not, charge 2x cpu_operator_cost per tuple to
2466 : : * reflect bookkeeping overhead. (This rate must be more than what
2467 : : * cost_rescan charges for materialize, ie, cpu_operator_cost per tuple;
2468 : : * if it is exactly the same then there will be a cost tie between
2469 : : * nestloop with A outer, materialized B inner and nestloop with B outer,
2470 : : * materialized A inner. The extra cost ensures we'll prefer
2471 : : * materializing the smaller rel.) Note that this is normally a good deal
2472 : : * less than cpu_tuple_cost; which is OK because a Material plan node
2473 : : * doesn't do qual-checking or projection, so it's got less overhead than
2474 : : * most plan nodes.
2475 : : */
5168 2476 : 211131 : run_cost += 2 * cpu_operator_cost * tuples;
2477 : :
2478 : : /*
2479 : : * If we will spill to disk, charge at the rate of seq_page_cost per page.
2480 : : * This cost is assumed to be evenly spread through the plan run phase,
2481 : : * which isn't exactly accurate but our cost model doesn't allow for
2482 : : * nonuniform costs within the run phase.
2483 : : */
7376 2484 [ + + ]: 211131 : if (nbytes > work_mem_bytes)
2485 : : {
7806 2486 : 2165 : double npages = ceil(nbytes / BLCKSZ);
2487 : :
6523 2488 : 2165 : run_cost += seq_page_cost * npages;
2489 : : }
2490 : :
7806 2491 : 211131 : path->startup_cost = startup_cost;
2492 : 211131 : path->total_cost = startup_cost + run_cost;
2493 : 211131 : }
2494 : :
2495 : : /*
2496 : : * cost_memoize_rescan
2497 : : * Determines the estimated cost of rescanning a Memoize node.
2498 : : *
2499 : : * In order to estimate this, we must gain knowledge of how often we expect to
2500 : : * be called and how many distinct sets of parameters we are likely to be
2501 : : * called with. If we expect a good cache hit ratio, then we can set our
2502 : : * costs to account for that hit ratio, plus a little bit of cost for the
2503 : : * caching itself. Caching will not work out well if we expect to be called
2504 : : * with too many distinct parameter values. The worst-case here is that we
2505 : : * never see any parameter value twice, in which case we'd never get a cache
2506 : : * hit and caching would be a complete waste of effort.
2507 : : */
2508 : : static void
1005 drowley@postgresql.o 2509 : 112582 : cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
2510 : : Cost *rescan_startup_cost, Cost *rescan_total_cost)
2511 : : {
2512 : : EstimationInfo estinfo;
2513 : : ListCell *lc;
2514 : 112582 : Cost input_startup_cost = mpath->subpath->startup_cost;
2515 : 112582 : Cost input_total_cost = mpath->subpath->total_cost;
2516 : 112582 : double tuples = mpath->subpath->rows;
2517 : 112582 : double calls = mpath->calls;
2518 : 112582 : int width = mpath->subpath->pathtarget->width;
2519 : :
2520 : : double hash_mem_bytes;
2521 : : double est_entry_bytes;
2522 : : double est_cache_entries;
2523 : : double ndistinct;
2524 : : double evict_ratio;
2525 : : double hit_ratio;
2526 : : Cost startup_cost;
2527 : : Cost total_cost;
2528 : :
2529 : : /* available cache space */
994 tgl@sss.pgh.pa.us 2530 : 112582 : hash_mem_bytes = get_hash_memory_limit();
2531 : :
2532 : : /*
2533 : : * Set the number of bytes each cache entry should consume in the cache.
2534 : : * To provide us with better estimations on how many cache entries we can
2535 : : * store at once, we make a call to the executor here to ask it what
2536 : : * memory overheads there are for a single cache entry.
2537 : : */
1108 drowley@postgresql.o 2538 : 112582 : est_entry_bytes = relation_byte_size(tuples, width) +
2539 : 112582 : ExecEstimateCacheEntryOverheadBytes(tuples);
2540 : :
2541 : : /* include the estimated width for the cache keys */
391 2542 [ + - + + : 238034 : foreach(lc, mpath->param_exprs)
+ + ]
2543 : 125452 : est_entry_bytes += get_expr_width(root, (Node *) lfirst(lc));
2544 : :
2545 : : /* estimate on the upper limit of cache entries we can hold at once */
1108 2546 : 112582 : est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
2547 : :
2548 : : /* estimate on the distinct number of parameter values */
1005 2549 : 112582 : ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
2550 : : &estinfo);
2551 : :
2552 : : /*
2553 : : * When the estimation fell back on using a default value, it's a bit too
2554 : : * risky to assume that it's ok to use a Memoize node. The use of a
2555 : : * default could cause us to use a Memoize node when it's really
2556 : : * inappropriate to do so. If we see that this has been done, then we'll
2557 : : * assume that every call will have unique parameters, which will almost
2558 : : * certainly mean a MemoizePath will never survive add_path().
2559 : : */
1108 2560 [ + + ]: 112582 : if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
2561 : 5208 : ndistinct = calls;
2562 : :
2563 : : /*
2564 : : * Since we've already estimated the maximum number of entries we can
2565 : : * store at once and know the estimated number of distinct values we'll be
2566 : : * called with, we'll take this opportunity to set the path's est_entries.
2567 : : * This will ultimately determine the hash table size that the executor
2568 : : * will use. If we leave this at zero, the executor will just choose the
2569 : : * size itself. Really this is not the right place to do this, but it's
2570 : : * convenient since everything is already calculated.
2571 : : */
1005 2572 [ + + + - : 112582 : mpath->est_entries = Min(Min(ndistinct, est_cache_entries),
+ + ]
2573 : : PG_UINT32_MAX);
2574 : :
2575 : : /*
2576 : : * When the number of distinct parameter values is above the amount we can
2577 : : * store in the cache, then we'll have to evict some entries from the
2578 : : * cache. This is not free. Here we estimate how often we'll incur the
2579 : : * cost of that eviction.
2580 : : */
1108 2581 [ + + ]: 112582 : evict_ratio = 1.0 - Min(est_cache_entries, ndistinct) / ndistinct;
2582 : :
2583 : : /*
2584 : : * In order to estimate how costly a single scan will be, we need to
2585 : : * attempt to estimate what the cache hit ratio will be. To do that we
2586 : : * must look at how many scans are estimated in total for this node and
2587 : : * how many of those scans we expect to get a cache hit.
2588 : : */
389 2589 : 225164 : hit_ratio = ((calls - ndistinct) / calls) *
2590 [ + + ]: 112582 : (est_cache_entries / Max(ndistinct, est_cache_entries));
2591 : :
2592 [ + - - + ]: 112582 : Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
2593 : :
2594 : : /*
2595 : : * Set the total_cost accounting for the expected cache hit ratio. We
2596 : : * also add on a cpu_operator_cost to account for a cache lookup. This
2597 : : * will happen regardless of whether it's a cache hit or not.
2598 : : */
1108 2599 : 112582 : total_cost = input_total_cost * (1.0 - hit_ratio) + cpu_operator_cost;
2600 : :
2601 : : /* Now adjust the total cost to account for cache evictions */
2602 : :
2603 : : /* Charge a cpu_tuple_cost for evicting the actual cache entry */
2604 : 112582 : total_cost += cpu_tuple_cost * evict_ratio;
2605 : :
2606 : : /*
2607 : : * Charge a 10th of cpu_operator_cost to evict every tuple in that entry.
2608 : : * The per-tuple eviction is really just a pfree, so charging a whole
2609 : : * cpu_operator_cost seems a little excessive.
2610 : : */
2611 : 112582 : total_cost += cpu_operator_cost / 10.0 * evict_ratio * tuples;
2612 : :
2613 : : /*
2614 : : * Now adjust for storing things in the cache, since that's not free
2615 : : * either. Everything must go in the cache. We don't proportion this
2616 : : * over any ratio, just apply it once for the scan. We charge a
2617 : : * cpu_tuple_cost for the creation of the cache entry and also a
2618 : : * cpu_operator_cost for each tuple we expect to cache.
2619 : : */
2620 : 112582 : total_cost += cpu_tuple_cost + cpu_operator_cost * tuples;
2621 : :
2622 : : /*
2623 : : * Getting the first row must be also be proportioned according to the
2624 : : * expected cache hit ratio.
2625 : : */
2626 : 112582 : startup_cost = input_startup_cost * (1.0 - hit_ratio);
2627 : :
2628 : : /*
2629 : : * Additionally we charge a cpu_tuple_cost to account for cache lookups,
2630 : : * which we'll do regardless of whether it was a cache hit or not.
2631 : : */
2632 : 112582 : startup_cost += cpu_tuple_cost;
2633 : :
2634 : 112582 : *rescan_startup_cost = startup_cost;
2635 : 112582 : *rescan_total_cost = total_cost;
2636 : 112582 : }
2637 : :
2638 : : /*
2639 : : * cost_agg
2640 : : * Determines and returns the cost of performing an Agg plan node,
2641 : : * including the cost of its input.
2642 : : *
2643 : : * aggcosts can be NULL when there are no actual aggregate functions (i.e.,
2644 : : * we are using a hashed Agg node just to do grouping).
2645 : : *
2646 : : * Note: when aggstrategy == AGG_SORTED, caller must ensure that input costs
2647 : : * are for appropriately-sorted input.
2648 : : */
2649 : : void
6888 tgl@sss.pgh.pa.us 2650 : 30967 : cost_agg(Path *path, PlannerInfo *root,
2651 : : AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
2652 : : int numGroupCols, double numGroups,
2653 : : List *quals,
2654 : : Cost input_startup_cost, Cost input_total_cost,
2655 : : double input_tuples, double input_width)
2656 : : {
2657 : : double output_tuples;
2658 : : Cost startup_cost;
2659 : : Cost total_cost;
2660 : : AggClauseCosts dummy_aggcosts;
2661 : :
2662 : : /* Use all-zero per-aggregate costs if NULL is passed */
4739 2663 [ + + ]: 30967 : if (aggcosts == NULL)
2664 : : {
2665 [ - + ]: 4676 : Assert(aggstrategy == AGG_HASHED);
638 peter@eisentraut.org 2666 [ + - + - : 28056 : MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
+ - + - +
+ ]
4739 tgl@sss.pgh.pa.us 2667 : 4676 : aggcosts = &dummy_aggcosts;
2668 : : }
2669 : :
2670 : : /*
2671 : : * The transCost.per_tuple component of aggcosts should be charged once
2672 : : * per input tuple, corresponding to the costs of evaluating the aggregate
2673 : : * transfns and their input expressions. The finalCost.per_tuple component
2674 : : * is charged once per output tuple, corresponding to the costs of
2675 : : * evaluating the finalfns. Startup costs are of course charged but once.
2676 : : *
2677 : : * If we are grouping, we charge an additional cpu_operator_cost per
2678 : : * grouping column per input tuple for grouping comparisons.
2679 : : *
2680 : : * We will produce a single output tuple if not grouping, and a tuple per
2681 : : * group otherwise. We charge cpu_tuple_cost for each output tuple.
2682 : : *
2683 : : * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
2684 : : * same total CPU cost, but AGG_SORTED has lower startup cost. If the
2685 : : * input path is already sorted appropriately, AGG_SORTED should be
2686 : : * preferred (since it has no risk of memory overflow). This will happen
2687 : : * as long as the computed total costs are indeed exactly equal --- but if
2688 : : * there's roundoff error we might do the wrong thing. So be sure that
2689 : : * the computations below form the same intermediate values in the same
2690 : : * order.
2691 : : */
7815 2692 [ + + ]: 30967 : if (aggstrategy == AGG_PLAIN)
2693 : : {
2694 : 17222 : startup_cost = input_total_cost;
4739 2695 : 17222 : startup_cost += aggcosts->transCost.startup;
2696 : 17222 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
1891 2697 : 17222 : startup_cost += aggcosts->finalCost.startup;
2698 : 17222 : startup_cost += aggcosts->finalCost.per_tuple;
2699 : : /* we aren't grouping */
6805 2700 : 17222 : total_cost = startup_cost + cpu_tuple_cost;
4461 2701 : 17222 : output_tuples = 1;
2702 : : }
2575 rhodiumtoad@postgres 2703 [ + + + + ]: 13745 : else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
2704 : : {
2705 : : /* Here we are able to deliver output on-the-fly */
7815 tgl@sss.pgh.pa.us 2706 : 5204 : startup_cost = input_startup_cost;
2707 : 5204 : total_cost = input_total_cost;
2575 rhodiumtoad@postgres 2708 [ + + + + ]: 5204 : if (aggstrategy == AGG_MIXED && !enable_hashagg)
2709 : : {
2710 : 228 : startup_cost += disable_cost;
2711 : 228 : total_cost += disable_cost;
2712 : : }
2713 : : /* calcs phrased this way to match HASHED case, see note above */
4739 tgl@sss.pgh.pa.us 2714 : 5204 : total_cost += aggcosts->transCost.startup;
2715 : 5204 : total_cost += aggcosts->transCost.per_tuple * input_tuples;
2716 : 5204 : total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1891 2717 : 5204 : total_cost += aggcosts->finalCost.startup;
2718 : 5204 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
6805 2719 : 5204 : total_cost += cpu_tuple_cost * numGroups;
4461 2720 : 5204 : output_tuples = numGroups;
2721 : : }
2722 : : else
2723 : : {
2724 : : /* must be AGG_HASHED */
7815 2725 : 8541 : startup_cost = input_total_cost;
2946 rhaas@postgresql.org 2726 [ + + ]: 8541 : if (!enable_hashagg)
2727 : 741 : startup_cost += disable_cost;
4739 tgl@sss.pgh.pa.us 2728 : 8541 : startup_cost += aggcosts->transCost.startup;
2729 : 8541 : startup_cost += aggcosts->transCost.per_tuple * input_tuples;
2730 : : /* cost of computing hash value */
2731 : 8541 : startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
1891 2732 : 8541 : startup_cost += aggcosts->finalCost.startup;
2733 : :
7815 2734 : 8541 : total_cost = startup_cost;
1891 2735 : 8541 : total_cost += aggcosts->finalCost.per_tuple * numGroups;
2736 : : /* cost of retrieving from hash table */
6805 2737 : 8541 : total_cost += cpu_tuple_cost * numGroups;
4461 2738 : 8541 : output_tuples = numGroups;
2739 : : }
2740 : :
2741 : : /*
2742 : : * Add the disk costs of hash aggregation that spills to disk.
2743 : : *
2744 : : * Groups that go into the hash table stay in memory until finalized, so
2745 : : * spilling and reprocessing tuples doesn't incur additional invocations
2746 : : * of transCost or finalCost. Furthermore, the computed hash value is
2747 : : * stored with the spilled tuples, so we don't incur extra invocations of
2748 : : * the hash function.
2749 : : *
2750 : : * Hash Agg begins returning tuples after the first batch is complete.
2751 : : * Accrue writes (spilled tuples) to startup_cost and to total_cost;
2752 : : * accrue reads only to total_cost.
2753 : : */
1488 jdavis@postgresql.or 2754 [ + + + + ]: 30967 : if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
2755 : : {
2756 : : double pages;
1431 tgl@sss.pgh.pa.us 2757 : 8963 : double pages_written = 0.0;
2758 : 8963 : double pages_read = 0.0;
2759 : : double spill_cost;
2760 : : double hashentrysize;
2761 : : double nbatches;
2762 : : Size mem_limit;
2763 : : uint64 ngroups_limit;
2764 : : int num_partitions;
2765 : : int depth;
2766 : :
2767 : : /*
2768 : : * Estimate number of batches based on the computed limits. If less
2769 : : * than or equal to one, all groups are expected to fit in memory;
2770 : : * otherwise we expect to spill.
2771 : : */
1237 heikki.linnakangas@i 2772 : 8963 : hashentrysize = hash_agg_entry_size(list_length(root->aggtransinfos),
2773 : : input_width,
1431 tgl@sss.pgh.pa.us 2774 : 8963 : aggcosts->transitionSpace);
1488 jdavis@postgresql.or 2775 : 8963 : hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
2776 : : &ngroups_limit, &num_partitions);
2777 : :
1431 tgl@sss.pgh.pa.us 2778 [ - + ]: 8963 : nbatches = Max((numGroups * hashentrysize) / mem_limit,
2779 : : numGroups / ngroups_limit);
2780 : :
1478 jdavis@postgresql.or 2781 [ + + ]: 8963 : nbatches = Max(ceil(nbatches), 1.0);
2782 : 8963 : num_partitions = Max(num_partitions, 2);
2783 : :
2784 : : /*
2785 : : * The number of partitions can change at different levels of
2786 : : * recursion; but for the purposes of this calculation assume it stays
2787 : : * constant.
2788 : : */
1431 tgl@sss.pgh.pa.us 2789 : 8963 : depth = ceil(log(nbatches) / log(num_partitions));
2790 : :
2791 : : /*
2792 : : * Estimate number of pages read and written. For each level of
2793 : : * recursion, a tuple must be written and then later read.
2794 : : */
1478 jdavis@postgresql.or 2795 : 8963 : pages = relation_byte_size(input_tuples, input_width) / BLCKSZ;
2796 : 8963 : pages_written = pages_read = pages * depth;
2797 : :
2798 : : /*
2799 : : * HashAgg has somewhat worse IO behavior than Sort on typical
2800 : : * hardware/OS combinations. Account for this with a generic penalty.
2801 : : */
1315 2802 : 8963 : pages_read *= 2.0;
2803 : 8963 : pages_written *= 2.0;
2804 : :
1488 2805 : 8963 : startup_cost += pages_written * random_page_cost;
2806 : 8963 : total_cost += pages_written * random_page_cost;
2807 : 8963 : total_cost += pages_read * seq_page_cost;
2808 : :
2809 : : /* account for CPU cost of spilling a tuple and reading it back */
1315 2810 : 8963 : spill_cost = depth * input_tuples * 2.0 * cpu_tuple_cost;
2811 : 8963 : startup_cost += spill_cost;
2812 : 8963 : total_cost += spill_cost;
2813 : : }
2814 : :
2815 : : /*
2816 : : * If there are quals (HAVING quals), account for their cost and
2817 : : * selectivity.
2818 : : */
2355 tgl@sss.pgh.pa.us 2819 [ + + ]: 30967 : if (quals)
2820 : : {
2821 : : QualCost qual_cost;
2822 : :
2823 : 2094 : cost_qual_eval(&qual_cost, quals, root);
2824 : 2094 : startup_cost += qual_cost.startup;
2825 : 2094 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
2826 : :
2827 : 2094 : output_tuples = clamp_row_est(output_tuples *
2828 : 2094 : clauselist_selectivity(root,
2829 : : quals,
2830 : : 0,
2831 : : JOIN_INNER,
2832 : : NULL));
2833 : : }
2834 : :
4461 2835 : 30967 : path->rows = output_tuples;
7815 2836 : 30967 : path->startup_cost = startup_cost;
2837 : 30967 : path->total_cost = total_cost;
2838 : 30967 : }
2839 : :
2840 : : /*
2841 : : * get_windowclause_startup_tuples
2842 : : * Estimate how many tuples we'll need to fetch from a WindowAgg's
2843 : : * subnode before we can output the first WindowAgg tuple.
2844 : : *
2845 : : * How many tuples need to be read depends on the WindowClause. For example,
2846 : : * a WindowClause with no PARTITION BY and no ORDER BY requires that all
2847 : : * subnode tuples are read and aggregated before the WindowAgg can output
2848 : : * anything. If there's a PARTITION BY, then we only need to look at tuples
2849 : : * in the first partition. Here we attempt to estimate just how many
2850 : : * 'input_tuples' the WindowAgg will need to read for the given WindowClause
2851 : : * before the first tuple can be output.
2852 : : */
2853 : : static double
254 drowley@postgresql.o 2854 :GNC 1315 : get_windowclause_startup_tuples(PlannerInfo *root, WindowClause *wc,
2855 : : double input_tuples)
2856 : : {
2857 : 1315 : int frameOptions = wc->frameOptions;
2858 : : double partition_tuples;
2859 : : double return_tuples;
2860 : : double peer_tuples;
2861 : :
2862 : : /*
2863 : : * First, figure out how many partitions there are likely to be and set
2864 : : * partition_tuples according to that estimate.
2865 : : */
2866 [ + + ]: 1315 : if (wc->partitionClause != NIL)
2867 : : {
2868 : : double num_partitions;
2869 : 322 : List *partexprs = get_sortgrouplist_exprs(wc->partitionClause,
2870 : 322 : root->parse->targetList);
2871 : :
2872 : 322 : num_partitions = estimate_num_groups(root, partexprs, input_tuples,
2873 : : NULL, NULL);
2874 : 322 : list_free(partexprs);
2875 : :
2876 : 322 : partition_tuples = input_tuples / num_partitions;
2877 : : }
2878 : : else
2879 : : {
2880 : : /* all tuples belong to the same partition */
2881 : 993 : partition_tuples = input_tuples;
2882 : : }
2883 : :
2884 : : /* estimate the number of tuples in each peer group */
2885 [ + + ]: 1315 : if (wc->orderClause != NIL)
2886 : : {
2887 : : double num_groups;
2888 : : List *orderexprs;
2889 : :
2890 : 1105 : orderexprs = get_sortgrouplist_exprs(wc->orderClause,
2891 : 1105 : root->parse->targetList);
2892 : :
2893 : : /* estimate out how many peer groups there are in the partition */
2894 : 1105 : num_groups = estimate_num_groups(root, orderexprs,
2895 : : partition_tuples, NULL,
2896 : : NULL);
2897 : 1105 : list_free(orderexprs);
2898 : 1105 : peer_tuples = partition_tuples / num_groups;
2899 : : }
2900 : : else
2901 : : {
2902 : : /* no ORDER BY so only 1 tuple belongs in each peer group */
2903 : 210 : peer_tuples = 1.0;
2904 : : }
2905 : :
2906 [ + + ]: 1315 : if (frameOptions & FRAMEOPTION_END_UNBOUNDED_FOLLOWING)
2907 : : {
2908 : : /* include all partition rows */
2909 : 173 : return_tuples = partition_tuples;
2910 : : }
2911 [ + + ]: 1142 : else if (frameOptions & FRAMEOPTION_END_CURRENT_ROW)
2912 : : {
2913 [ + + ]: 647 : if (frameOptions & FRAMEOPTION_ROWS)
2914 : : {
2915 : : /* just count the current row */
2916 : 288 : return_tuples = 1.0;
2917 : : }
2918 [ + - ]: 359 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
2919 : : {
2920 : : /*
2921 : : * When in RANGE/GROUPS mode, it's more complex. If there's no
2922 : : * ORDER BY, then all rows in the partition are peers, otherwise
2923 : : * we'll need to read the first group of peers.
2924 : : */
2925 [ + + ]: 359 : if (wc->orderClause == NIL)
2926 : 129 : return_tuples = partition_tuples;
2927 : : else
2928 : 230 : return_tuples = peer_tuples;
2929 : : }
2930 : : else
2931 : : {
2932 : : /*
2933 : : * Something new we don't support yet? This needs attention.
2934 : : * We'll just return 1.0 in the meantime.
2935 : : */
254 drowley@postgresql.o 2936 :UNC 0 : Assert(false);
2937 : : return_tuples = 1.0;
2938 : : }
2939 : : }
254 drowley@postgresql.o 2940 [ + + ]:GNC 495 : else if (frameOptions & FRAMEOPTION_END_OFFSET_PRECEDING)
2941 : : {
2942 : : /*
2943 : : * BETWEEN ... AND N PRECEDING will only need to read the WindowAgg's
2944 : : * subnode after N ROWS/RANGES/GROUPS. N can be 0, but not negative,
2945 : : * so we'll just assume only the current row needs to be read to fetch
2946 : : * the first WindowAgg row.
2947 : : */
2948 : 54 : return_tuples = 1.0;
2949 : : }
2950 [ + - ]: 441 : else if (frameOptions & FRAMEOPTION_END_OFFSET_FOLLOWING)
2951 : : {
2952 : 441 : Const *endOffset = (Const *) wc->endOffset;
2953 : : double end_offset_value;
2954 : :
2955 : : /* try and figure out the value specified in the endOffset. */
2956 [ + + ]: 441 : if (IsA(endOffset, Const))
2957 : : {
2958 [ - + ]: 435 : if (endOffset->constisnull)
2959 : : {
2960 : : /*
2961 : : * NULLs are not allowed, but currently, there's no code to
2962 : : * error out if there's a NULL Const. We'll only discover
2963 : : * this during execution. For now, just pretend everything is
2964 : : * fine and assume that just the first row/range/group will be
2965 : : * needed.
2966 : : */
254 drowley@postgresql.o 2967 :UNC 0 : end_offset_value = 1.0;
2968 : : }
2969 : : else
2970 : : {
254 drowley@postgresql.o 2971 [ + + + + ]:GNC 435 : switch (endOffset->consttype)
2972 : : {
2973 : 12 : case INT2OID:
2974 : 12 : end_offset_value =
2975 : 12 : (double) DatumGetInt16(endOffset->constvalue);
2976 : 12 : break;
2977 : 66 : case INT4OID:
2978 : 66 : end_offset_value =
2979 : 66 : (double) DatumGetInt32(endOffset->constvalue);
2980 : 66 : break;
2981 : 186 : case INT8OID:
2982 : 186 : end_offset_value =
2983 : 186 : (double) DatumGetInt64(endOffset->constvalue);
2984 : 186 : break;
2985 : 171 : default:
2986 : 171 : end_offset_value =
2987 : 171 : partition_tuples / peer_tuples *
2988 : : DEFAULT_INEQ_SEL;
2989 : 171 : break;
2990 : : }
2991 : : }
2992 : : }
2993 : : else
2994 : : {
2995 : : /*
2996 : : * When the end bound is not a Const, we'll just need to guess. We
2997 : : * just make use of DEFAULT_INEQ_SEL.
2998 : : */
2999 : 6 : end_offset_value =
3000 : 6 : partition_tuples / peer_tuples * DEFAULT_INEQ_SEL;
3001 : : }
3002 : :
3003 [ + + ]: 441 : if (frameOptions & FRAMEOPTION_ROWS)
3004 : : {
3005 : : /* include the N FOLLOWING and the current row */
3006 : 111 : return_tuples = end_offset_value + 1.0;
3007 : : }
3008 [ + - ]: 330 : else if (frameOptions & (FRAMEOPTION_RANGE | FRAMEOPTION_GROUPS))
3009 : : {
3010 : : /* include N FOLLOWING ranges/group and the initial range/group */
3011 : 330 : return_tuples = peer_tuples * (end_offset_value + 1.0);
3012 : : }
3013 : : else
3014 : : {
3015 : : /*
3016 : : * Something new we don't support yet? This needs attention.
3017 : : * We'll just return 1.0 in the meantime.
3018 : : */
254 drowley@postgresql.o 3019 :UNC 0 : Assert(false);
3020 : : return_tuples = 1.0;
3021 : : }
3022 : : }
3023 : : else
3024 : : {
3025 : : /*
3026 : : * Something new we don't support yet? This needs attention. We'll
3027 : : * just return 1.0 in the meantime.
3028 : : */
3029 : 0 : Assert(false);
3030 : : return_tuples = 1.0;
3031 : : }
3032 : :
254 drowley@postgresql.o 3033 [ + + + + ]:GNC 1315 : if (wc->partitionClause != NIL || wc->orderClause != NIL)
3034 : : {
3035 : : /*
3036 : : * Cap the return value to the estimated partition tuples and account
3037 : : * for the extra tuple WindowAgg will need to read to confirm the next
3038 : : * tuple does not belong to the same partition or peer group.
3039 : : */
3040 [ + + ]: 1186 : return_tuples = Min(return_tuples + 1.0, partition_tuples);
3041 : : }
3042 : : else
3043 : : {
3044 : : /*
3045 : : * Cap the return value so it's never higher than the expected tuples
3046 : : * in the partition.
3047 : : */
3048 [ + + ]: 129 : return_tuples = Min(return_tuples, partition_tuples);
3049 : : }
3050 : :
3051 : : /*
3052 : : * We needn't worry about any EXCLUDE options as those only exclude rows
3053 : : * from being aggregated, not from being read from the WindowAgg's
3054 : : * subnode.
3055 : : */
3056 : :
3057 : 1315 : return clamp_row_est(return_tuples);
3058 : : }
3059 : :
3060 : : /*
3061 : : * cost_windowagg
3062 : : * Determines and returns the cost of performing a WindowAgg plan node,
3063 : : * including the cost of its input.
3064 : : *
3065 : : * Input is assumed already properly sorted.
3066 : : */
3067 : : void
5586 tgl@sss.pgh.pa.us 3068 :CBC 1315 : cost_windowagg(Path *path, PlannerInfo *root,
3069 : : List *windowFuncs, WindowClause *winclause,
3070 : : Cost input_startup_cost, Cost input_total_cost,
3071 : : double input_tuples)
3072 : : {
3073 : : Cost startup_cost;
3074 : : Cost total_cost;
3075 : : double startup_tuples;
3076 : : int numPartCols;
3077 : : int numOrderCols;
3078 : : ListCell *lc;
3079 : :
254 drowley@postgresql.o 3080 :GNC 1315 : numPartCols = list_length(winclause->partitionClause);
3081 : 1315 : numOrderCols = list_length(winclause->orderClause);
3082 : :
5586 tgl@sss.pgh.pa.us 3083 :CBC 1315 : startup_cost = input_startup_cost;
3084 : 1315 : total_cost = input_total_cost;
3085 : :
3086 : : /*
3087 : : * Window functions are assumed to cost their stated execution cost, plus
3088 : : * the cost of evaluating their input expressions, per tuple. Since they
3089 : : * may in fact evaluate their inputs at multiple rows during each cycle,
3090 : : * this could be a drastic underestimate; but without a way to know how
3091 : : * many rows the window function will fetch, it's hard to do better. In
3092 : : * any case, it's a good estimate for all the built-in window functions,
3093 : : * so we'll just do this for now.
3094 : : */
4739 3095 [ + - + + : 2981 : foreach(lc, windowFuncs)
+ + ]
3096 : : {
2561 3097 : 1666 : WindowFunc *wfunc = lfirst_node(WindowFunc, lc);
3098 : : Cost wfunccost;
3099 : : QualCost argcosts;
3100 : :
1891 3101 : 1666 : argcosts.startup = argcosts.per_tuple = 0;
3102 : 1666 : add_function_cost(root, wfunc->winfnoid, (Node *) wfunc,
3103 : : &argcosts);
3104 : 1666 : startup_cost += argcosts.startup;
3105 : 1666 : wfunccost = argcosts.per_tuple;
3106 : :
3107 : : /* also add the input expressions' cost to per-input-row costs */
4739 3108 : 1666 : cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
3109 : 1666 : startup_cost += argcosts.startup;
3110 : 1666 : wfunccost += argcosts.per_tuple;
3111 : :
3112 : : /*
3113 : : * Add the filter's cost to per-input-row costs. XXX We should reduce
3114 : : * input expression costs according to filter selectivity.
3115 : : */
3925 noah@leadboat.com 3116 : 1666 : cost_qual_eval_node(&argcosts, (Node *) wfunc->aggfilter, root);
3117 : 1666 : startup_cost += argcosts.startup;
3118 : 1666 : wfunccost += argcosts.per_tuple;
3119 : :
4739 tgl@sss.pgh.pa.us 3120 : 1666 : total_cost += wfunccost * input_tuples;
3121 : : }
3122 : :
3123 : : /*
3124 : : * We also charge cpu_operator_cost per grouping column per tuple for
3125 : : * grouping comparisons, plus cpu_tuple_cost per tuple for general
3126 : : * overhead.
3127 : : *
3128 : : * XXX this neglects costs of spooling the data to disk when it overflows
3129 : : * work_mem. Sooner or later that should get accounted for.
3130 : : */
3131 : 1315 : total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
5586 3132 : 1315 : total_cost += cpu_tuple_cost * input_tuples;
3133 : :
4461 3134 : 1315 : path->rows = input_tuples;
5586 3135 : 1315 : path->startup_cost = startup_cost;
3136 : 1315 : path->total_cost = total_cost;
3137 : :
3138 : : /*
3139 : : * Also, take into account how many tuples we need to read from the
3140 : : * subnode in order to produce the first tuple from the WindowAgg. To do
3141 : : * this we proportion the run cost (total cost not including startup cost)
3142 : : * over the estimated startup tuples. We already included the startup
3143 : : * cost of the subnode, so we only need to do this when the estimated
3144 : : * startup tuples is above 1.0.
3145 : : */
254 drowley@postgresql.o 3146 :GNC 1315 : startup_tuples = get_windowclause_startup_tuples(root, winclause,
3147 : : input_tuples);
3148 : :
3149 [ + + ]: 1315 : if (startup_tuples > 1.0)
3150 : 1179 : path->startup_cost += (total_cost - startup_cost) / input_tuples *
3151 : 1179 : (startup_tuples - 1.0);
5586 tgl@sss.pgh.pa.us 3152 :CBC 1315 : }
3153 : :
3154 : : /*
3155 : : * cost_group
3156 : : * Determines and returns the cost of performing a Group plan node,
3157 : : * including the cost of its input.
3158 : : *
3159 : : * Note: caller must ensure that input costs are for appropriately-sorted
3160 : : * input.
3161 : : */
3162 : : void
6888 3163 : 782 : cost_group(Path *path, PlannerInfo *root,
3164 : : int numGroupCols, double numGroups,
3165 : : List *quals,
3166 : : Cost input_startup_cost, Cost input_total_cost,
3167 : : double input_tuples)
3168 : : {
3169 : : double output_tuples;
3170 : : Cost startup_cost;
3171 : : Cost total_cost;
3172 : :
2355 3173 : 782 : output_tuples = numGroups;
7815 3174 : 782 : startup_cost = input_startup_cost;
3175 : 782 : total_cost = input_total_cost;
3176 : :
3177 : : /*
3178 : : * Charge one cpu_operator_cost per comparison per input tuple. We assume
3179 : : * all columns get compared at most of the tuples.
3180 : : */
3181 : 782 : total_cost += cpu_operator_cost * input_tuples * numGroupCols;
3182 : :
3183 : : /*
3184 : : * If there are quals (HAVING quals), account for their cost and
3185 : : * selectivity.
3186 : : */
2355 3187 [ - + ]: 782 : if (quals)
3188 : : {
3189 : : QualCost qual_cost;
3190 : :
2355 tgl@sss.pgh.pa.us 3191 :UBC 0 : cost_qual_eval(&qual_cost, quals, root);
3192 : 0 : startup_cost += qual_cost.startup;
3193 : 0 : total_cost += qual_cost.startup + output_tuples * qual_cost.per_tuple;
3194 : :
3195 : 0 : output_tuples = clamp_row_est(output_tuples *
3196 : 0 : clauselist_selectivity(root,
3197 : : quals,
3198 : : 0,
3199 : : JOIN_INNER,
3200 : : NULL));
3201 : : }
3202 : :
2355 tgl@sss.pgh.pa.us 3203 :CBC 782 : path->rows = output_tuples;
7815 3204 : 782 : path->startup_cost = startup_cost;
3205 : 782 : path->total_cost = total_cost;
3206 : 782 : }
3207 : :
3208 : : /*
3209 : : * initial_cost_nestloop
3210 : : * Preliminary estimate of the cost of a nestloop join path.
3211 : : *
3212 : : * This must quickly produce lower-bound estimates of the path's startup and
3213 : : * total costs. If we are unable to eliminate the proposed path from
3214 : : * consideration using the lower bounds, final_cost_nestloop will be called
3215 : : * to obtain the final estimates.
3216 : : *
3217 : : * The exact division of labor between this function and final_cost_nestloop
3218 : : * is private to them, and represents a tradeoff between speed of the initial
3219 : : * estimate and getting a tight lower bound. We choose to not examine the
3220 : : * join quals here, since that's by far the most expensive part of the
3221 : : * calculations. The end result is that CPU-cost considerations must be
3222 : : * left for the second phase; and for SEMI/ANTI joins, we must also postpone
3223 : : * incorporation of the inner path's run cost.
3224 : : *
3225 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3226 : : * other data to be used by final_cost_nestloop
3227 : : * 'jointype' is the type of join to be performed
3228 : : * 'outer_path' is the outer input to the join
3229 : : * 'inner_path' is the inner input to the join
3230 : : * 'extra' contains miscellaneous information about the join
3231 : : */
3232 : : void
4461 3233 : 1129791 : initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
3234 : : JoinType jointype,
3235 : : Path *outer_path, Path *inner_path,
3236 : : JoinPathExtraData *extra)
3237 : : {
8825 3238 : 1129791 : Cost startup_cost = 0;
3239 : 1129791 : Cost run_cost = 0;
4461 3240 : 1129791 : double outer_path_rows = outer_path->rows;
3241 : : Cost inner_rescan_start_cost;
3242 : : Cost inner_rescan_total_cost;
3243 : : Cost inner_run_cost;
3244 : : Cost inner_rescan_run_cost;
3245 : :
3246 : : /* estimate costs to rescan the inner relation */
5328 3247 : 1129791 : cost_rescan(root, inner_path,
3248 : : &inner_rescan_start_cost,
3249 : : &inner_rescan_total_cost);
3250 : :
3251 : : /* cost of source data */
3252 : :
3253 : : /*
3254 : : * NOTE: clearly, we must pay both outer and inner paths' startup_cost
3255 : : * before we can start returning tuples, so the join's startup cost is
3256 : : * their sum. We'll also pay the inner path's rescan startup cost
3257 : : * multiple times.
3258 : : */
8825 3259 : 1129791 : startup_cost += outer_path->startup_cost + inner_path->startup_cost;
3260 : 1129791 : run_cost += outer_path->total_cost - outer_path->startup_cost;
5328 3261 [ + + ]: 1129791 : if (outer_path_rows > 1)
3262 : 753651 : run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
3263 : :
5454 3264 : 1129791 : inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
5328 3265 : 1129791 : inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
3266 : :
2564 3267 [ + + + + ]: 1129791 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI ||
3268 [ + + ]: 1109808 : extra->inner_unique)
3269 : : {
3270 : : /*
3271 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3272 : : * executor will stop after the first match.
3273 : : *
3274 : : * Getting decent estimates requires inspection of the join quals,
3275 : : * which we choose to postpone to final_cost_nestloop.
3276 : : */
3277 : :
3278 : : /* Save private data for final_cost_nestloop */
3238 3279 : 520318 : workspace->inner_run_cost = inner_run_cost;
3280 : 520318 : workspace->inner_rescan_run_cost = inner_rescan_run_cost;
3281 : : }
3282 : : else
3283 : : {
3284 : : /* Normal case; we'll scan whole input rel for each outer row */
4461 3285 : 609473 : run_cost += inner_run_cost;
3286 [ + + ]: 609473 : if (outer_path_rows > 1)
3287 : 414708 : run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
3288 : : }
3289 : :
3290 : : /* CPU costs left for later */
3291 : :
3292 : : /* Public result fields */
3293 : 1129791 : workspace->startup_cost = startup_cost;
3294 : 1129791 : workspace->total_cost = startup_cost + run_cost;
3295 : : /* Save private data for final_cost_nestloop */
3296 : 1129791 : workspace->run_cost = run_cost;
3297 : 1129791 : }
3298 : :
3299 : : /*
3300 : : * final_cost_nestloop
3301 : : * Final estimate of the cost and result size of a nestloop join path.
3302 : : *
3303 : : * 'path' is already filled in except for the rows and cost fields
3304 : : * 'workspace' is the result from initial_cost_nestloop
3305 : : * 'extra' contains miscellaneous information about the join
3306 : : */
3307 : : void
3308 : 568941 : final_cost_nestloop(PlannerInfo *root, NestPath *path,
3309 : : JoinCostWorkspace *workspace,
3310 : : JoinPathExtraData *extra)
3311 : : {
980 peter@eisentraut.org 3312 : 568941 : Path *outer_path = path->jpath.outerjoinpath;
3313 : 568941 : Path *inner_path = path->jpath.innerjoinpath;
4461 tgl@sss.pgh.pa.us 3314 : 568941 : double outer_path_rows = outer_path->rows;
3315 : 568941 : double inner_path_rows = inner_path->rows;
3316 : 568941 : Cost startup_cost = workspace->startup_cost;
3317 : 568941 : Cost run_cost = workspace->run_cost;
3318 : : Cost cpu_per_tuple;
3319 : : QualCost restrict_qual_cost;
3320 : : double ntuples;
3321 : :
3322 : : /* Protect some assumptions below that rowcounts aren't zero */
1273 drowley@postgresql.o 3323 [ - + ]: 568941 : if (outer_path_rows <= 0)
2941 tgl@sss.pgh.pa.us 3324 :UBC 0 : outer_path_rows = 1;
1273 drowley@postgresql.o 3325 [ + + ]:CBC 568941 : if (inner_path_rows <= 0)
2941 tgl@sss.pgh.pa.us 3326 : 312 : inner_path_rows = 1;
3327 : : /* Mark the path with the correct row estimate */
980 peter@eisentraut.org 3328 [ + + ]: 568941 : if (path->jpath.path.param_info)
3329 : 13225 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3330 : : else
3331 : 555716 : path->jpath.path.rows = path->jpath.path.parent->rows;
3332 : :
3333 : : /* For partial paths, scale row estimate. */
3334 [ + + ]: 568941 : if (path->jpath.path.parallel_workers > 0)
3335 : : {
3336 : 3916 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3337 : :
3338 : 3916 : path->jpath.path.rows =
3339 : 3916 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3340 : : }
3341 : :
3342 : : /*
3343 : : * We could include disable_cost in the preliminary estimate, but that
3344 : : * would amount to optimizing for the case where the join method is
3345 : : * disabled, which doesn't seem like the way to bet.
3346 : : */
4461 tgl@sss.pgh.pa.us 3347 [ + + ]: 568941 : if (!enable_nestloop)
3348 : 1626 : startup_cost += disable_cost;
3349 : :
3350 : : /* cost of inner-relation source data (we already dealt with outer rel) */
3351 : :
980 peter@eisentraut.org 3352 [ + + + + ]: 568941 : if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI ||
2564 tgl@sss.pgh.pa.us 3353 [ + + ]: 555653 : extra->inner_unique)
4461 3354 : 360685 : {
3355 : : /*
3356 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3357 : : * executor will stop after the first match.
3358 : : */
3238 3359 : 360685 : Cost inner_run_cost = workspace->inner_run_cost;
3360 : 360685 : Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
3361 : : double outer_matched_rows;
3362 : : double outer_unmatched_rows;
3363 : : Selectivity inner_scan_frac;
3364 : :
3365 : : /*
3366 : : * For an outer-rel row that has at least one match, we can expect the
3367 : : * inner scan to stop after a fraction 1/(match_count+1) of the inner
3368 : : * rows, if the matches are evenly distributed. Since they probably
3369 : : * aren't quite evenly distributed, we apply a fuzz factor of 2.0 to
3370 : : * that fraction. (If we used a larger fuzz factor, we'd have to
3371 : : * clamp inner_scan_frac to at most 1.0; but since match_count is at
3372 : : * least 1, no such clamp is needed now.)
3373 : : */
2564 3374 : 360685 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
2507 3375 : 360685 : outer_unmatched_rows = outer_path_rows - outer_matched_rows;
2564 3376 : 360685 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
3377 : :
3378 : : /*
3379 : : * Compute number of tuples processed (not number emitted!). First,
3380 : : * account for successfully-matched outer rows.
3381 : : */
5454 3382 : 360685 : ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
3383 : :
3384 : : /*
3385 : : * Now we need to estimate the actual costs of scanning the inner
3386 : : * relation, which may be quite a bit less than N times inner_run_cost
3387 : : * due to early scan stops. We consider two cases. If the inner path
3388 : : * is an indexscan using all the joinquals as indexquals, then an
3389 : : * unmatched outer row results in an indexscan returning no rows,
3390 : : * which is probably quite cheap. Otherwise, the executor will have
3391 : : * to scan the whole inner rel for an unmatched row; not so cheap.
3392 : : */
4378 3393 [ + + ]: 360685 : if (has_indexed_join_quals(path))
3394 : : {
3395 : : /*
3396 : : * Successfully-matched outer rows will only require scanning
3397 : : * inner_scan_frac of the inner relation. In this case, we don't
3398 : : * need to charge the full inner_run_cost even when that's more
3399 : : * than inner_rescan_run_cost, because we can assume that none of
3400 : : * the inner scans ever scan the whole inner relation. So it's
3401 : : * okay to assume that all the inner scan executions can be
3402 : : * fractions of the full cost, even if materialization is reducing
3403 : : * the rescan cost. At this writing, it's impossible to get here
3404 : : * for a materialized inner scan, so inner_run_cost and
3405 : : * inner_rescan_run_cost will be the same anyway; but just in
3406 : : * case, use inner_run_cost for the first matched tuple and
3407 : : * inner_rescan_run_cost for additional ones.
3408 : : */
3238 3409 : 64746 : run_cost += inner_run_cost * inner_scan_frac;
3410 [ + + ]: 64746 : if (outer_matched_rows > 1)
3411 : 7145 : run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
3412 : :
3413 : : /*
3414 : : * Add the cost of inner-scan executions for unmatched outer rows.
3415 : : * We estimate this as the same cost as returning the first tuple
3416 : : * of a nonempty scan. We consider that these are all rescans,
3417 : : * since we used inner_run_cost once already.
3418 : : */
2507 3419 : 64746 : run_cost += outer_unmatched_rows *
5328 3420 : 64746 : inner_rescan_run_cost / inner_path_rows;
3421 : :
3422 : : /*
3423 : : * We won't be evaluating any quals at all for unmatched rows, so
3424 : : * don't add them to ntuples.
3425 : : */
3426 : : }
3427 : : else
3428 : : {
3429 : : /*
3430 : : * Here, a complicating factor is that rescans may be cheaper than
3431 : : * first scans. If we never scan all the way to the end of the
3432 : : * inner rel, it might be (depending on the plan type) that we'd
3433 : : * never pay the whole inner first-scan run cost. However it is
3434 : : * difficult to estimate whether that will happen (and it could
3435 : : * not happen if there are any unmatched outer rows!), so be
3436 : : * conservative and always charge the whole first-scan cost once.
3437 : : * We consider this charge to correspond to the first unmatched
3438 : : * outer row, unless there isn't one in our estimate, in which
3439 : : * case blame it on the first matched row.
3440 : : */
3441 : :
3442 : : /* First, count all unmatched join tuples as being processed */
2507 3443 : 295939 : ntuples += outer_unmatched_rows * inner_path_rows;
3444 : :
3445 : : /* Now add the forced full scan, and decrement appropriate count */
3238 3446 : 295939 : run_cost += inner_run_cost;
2507 3447 [ + + ]: 295939 : if (outer_unmatched_rows >= 1)
3448 : 288729 : outer_unmatched_rows -= 1;
3449 : : else
3450 : 7210 : outer_matched_rows -= 1;
3451 : :
3452 : : /* Add inner run cost for additional outer tuples having matches */
3453 [ + + ]: 295939 : if (outer_matched_rows > 0)
3454 : 92466 : run_cost += outer_matched_rows * inner_rescan_run_cost * inner_scan_frac;
3455 : :
3456 : : /* Add inner run cost for additional unmatched outer tuples */
3457 [ + + ]: 295939 : if (outer_unmatched_rows > 0)
3458 : 184183 : run_cost += outer_unmatched_rows * inner_rescan_run_cost;
3459 : : }
3460 : : }
3461 : : else
3462 : : {
3463 : : /* Normal-case source costs were included in preliminary estimate */
3464 : :
3465 : : /* Compute number of tuples processed (not number emitted!) */
5454 3466 : 208256 : ntuples = outer_path_rows * inner_path_rows;
3467 : : }
3468 : :
3469 : : /* CPU costs */
980 peter@eisentraut.org 3470 : 568941 : cost_qual_eval(&restrict_qual_cost, path->jpath.joinrestrictinfo, root);
7763 tgl@sss.pgh.pa.us 3471 : 568941 : startup_cost += restrict_qual_cost.startup;
3472 : 568941 : cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
8825 3473 : 568941 : run_cost += cpu_per_tuple * ntuples;
3474 : :
3475 : : /* tlist eval costs are paid per output row, not per tuple scanned */
980 peter@eisentraut.org 3476 : 568941 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3477 : 568941 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3478 : :
3479 : 568941 : path->jpath.path.startup_cost = startup_cost;
3480 : 568941 : path->jpath.path.total_cost = startup_cost + run_cost;
10141 scrappy@hub.org 3481 : 568941 : }
3482 : :
3483 : : /*
3484 : : * initial_cost_mergejoin
3485 : : * Preliminary estimate of the cost of a mergejoin path.
3486 : : *
3487 : : * This must quickly produce lower-bound estimates of the path's startup and
3488 : : * total costs. If we are unable to eliminate the proposed path from
3489 : : * consideration using the lower bounds, final_cost_mergejoin will be called
3490 : : * to obtain the final estimates.
3491 : : *
3492 : : * The exact division of labor between this function and final_cost_mergejoin
3493 : : * is private to them, and represents a tradeoff between speed of the initial
3494 : : * estimate and getting a tight lower bound. We choose to not examine the
3495 : : * join quals here, except for obtaining the scan selectivity estimate which
3496 : : * is really essential (but fortunately, use of caching keeps the cost of
3497 : : * getting that down to something reasonable).
3498 : : * We also assume that cost_sort is cheap enough to use here.
3499 : : *
3500 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
3501 : : * other data to be used by final_cost_mergejoin
3502 : : * 'jointype' is the type of join to be performed
3503 : : * 'mergeclauses' is the list of joinclauses to be used as merge clauses
3504 : : * 'outer_path' is the outer input to the join
3505 : : * 'inner_path' is the inner input to the join
3506 : : * 'outersortkeys' is the list of sort keys for the outer path
3507 : : * 'innersortkeys' is the list of sort keys for the inner path
3508 : : * 'extra' contains miscellaneous information about the join
3509 : : *
3510 : : * Note: outersortkeys and innersortkeys should be NIL if no explicit
3511 : : * sort is needed because the respective source path is already ordered.
3512 : : */
3513 : : void
4461 tgl@sss.pgh.pa.us 3514 : 506832 : initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
3515 : : JoinType jointype,
3516 : : List *mergeclauses,
3517 : : Path *outer_path, Path *inner_path,
3518 : : List *outersortkeys, List *innersortkeys,
3519 : : JoinPathExtraData *extra)
3520 : : {
8825 3521 : 506832 : Cost startup_cost = 0;
3522 : 506832 : Cost run_cost = 0;
4461 3523 : 506832 : double outer_path_rows = outer_path->rows;
3524 : 506832 : double inner_path_rows = inner_path->rows;
3525 : : Cost inner_run_cost;
3526 : : double outer_rows,
3527 : : inner_rows,
3528 : : outer_skip_rows,
3529 : : inner_skip_rows;
3530 : : Selectivity outerstartsel,
3531 : : outerendsel,
3532 : : innerstartsel,
3533 : : innerendsel;
3534 : : Path sort_path; /* dummy for result of cost_sort */
3535 : :
3536 : : /* Protect some assumptions below that rowcounts aren't zero */
1273 drowley@postgresql.o 3537 [ + + ]: 506832 : if (outer_path_rows <= 0)
5865 tgl@sss.pgh.pa.us 3538 : 48 : outer_path_rows = 1;
1273 drowley@postgresql.o 3539 [ + + ]: 506832 : if (inner_path_rows <= 0)
5865 tgl@sss.pgh.pa.us 3540 : 63 : inner_path_rows = 1;
3541 : :
3542 : : /*
3543 : : * A merge join will stop as soon as it exhausts either input stream
3544 : : * (unless it's an outer join, in which case the outer side has to be
3545 : : * scanned all the way anyway). Estimate fraction of the left and right
3546 : : * inputs that will actually need to be scanned. Likewise, we can
3547 : : * estimate the number of rows that will be skipped before the first join
3548 : : * pair is found, which should be factored into startup cost. We use only
3549 : : * the first (most significant) merge clause for this purpose. Since
3550 : : * mergejoinscansel() is a fairly expensive computation, we cache the
3551 : : * results in the merge clause RestrictInfo.
3552 : : */
4461 3553 [ + + + + ]: 506832 : if (mergeclauses && jointype != JOIN_FULL)
8080 3554 : 503780 : {
6294 3555 : 503780 : RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
3556 : : List *opathkeys;
3557 : : List *ipathkeys;
3558 : : PathKey *opathkey;
3559 : : PathKey *ipathkey;
3560 : : MergeScanSelCache *cache;
3561 : :
3562 : : /* Get the input pathkeys to determine the sort-order details */
3563 [ + + ]: 503780 : opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
3564 [ + + ]: 503780 : ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
3565 [ - + ]: 503780 : Assert(opathkeys);
3566 [ - + ]: 503780 : Assert(ipathkeys);
3567 : 503780 : opathkey = (PathKey *) linitial(opathkeys);
3568 : 503780 : ipathkey = (PathKey *) linitial(ipathkeys);
3569 : : /* debugging check */
3570 [ + - ]: 503780 : if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
4775 3571 [ + - ]: 503780 : opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
6294 3572 [ + - ]: 503780 : opathkey->pk_strategy != ipathkey->pk_strategy ||
3573 [ - + ]: 503780 : opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
6294 tgl@sss.pgh.pa.us 3574 [ # # ]:UBC 0 : elog(ERROR, "left and right pathkeys do not match in mergejoin");
3575 : :
3576 : : /* Get the selectivity with caching */
6292 tgl@sss.pgh.pa.us 3577 :CBC 503780 : cache = cached_scansel(root, firstclause, opathkey);
3578 : :
6294 3579 [ + + ]: 503780 : if (bms_is_subset(firstclause->left_relids,
3580 : 503780 : outer_path->parent->relids))
3581 : : {
3582 : : /* left side of clause is outer */
5972 3583 : 263518 : outerstartsel = cache->leftstartsel;
3584 : 263518 : outerendsel = cache->leftendsel;
3585 : 263518 : innerstartsel = cache->rightstartsel;
3586 : 263518 : innerendsel = cache->rightendsel;
3587 : : }
3588 : : else
3589 : : {
3590 : : /* left side of clause is inner */
3591 : 240262 : outerstartsel = cache->rightstartsel;
3592 : 240262 : outerendsel = cache->rightendsel;
3593 : 240262 : innerstartsel = cache->leftstartsel;
3594 : 240262 : innerendsel = cache->leftendsel;
3595 : : }
4461 3596 [ + + + + ]: 503780 : if (jointype == JOIN_LEFT ||
3597 : : jointype == JOIN_ANTI)
3598 : : {
5972 3599 : 100900 : outerstartsel = 0.0;
3600 : 100900 : outerendsel = 1.0;
3601 : : }
375 3602 [ + + + + ]: 402880 : else if (jointype == JOIN_RIGHT ||
3603 : : jointype == JOIN_RIGHT_ANTI)
3604 : : {
5972 3605 : 98507 : innerstartsel = 0.0;
3606 : 98507 : innerendsel = 1.0;
3607 : : }
3608 : : }
3609 : : else
3610 : : {
3611 : : /* cope with clauseless or full mergejoin */
3612 : 3052 : outerstartsel = innerstartsel = 0.0;
3613 : 3052 : outerendsel = innerendsel = 1.0;
3614 : : }
3615 : :
3616 : : /*
3617 : : * Convert selectivities to row counts. We force outer_rows and
3618 : : * inner_rows to be at least 1, but the skip_rows estimates can be zero.
3619 : : */
3620 : 506832 : outer_skip_rows = rint(outer_path_rows * outerstartsel);
3621 : 506832 : inner_skip_rows = rint(inner_path_rows * innerstartsel);
3622 : 506832 : outer_rows = clamp_row_est(outer_path_rows * outerendsel);
3623 : 506832 : inner_rows = clamp_row_est(inner_path_rows * innerendsel);
3624 : :
3625 [ - + ]: 506832 : Assert(outer_skip_rows <= outer_rows);
3626 [ - + ]: 506832 : Assert(inner_skip_rows <= inner_rows);
3627 : :
3628 : : /*
3629 : : * Readjust scan selectivities to account for above rounding. This is
3630 : : * normally an insignificant effect, but when there are only a few rows in
3631 : : * the inputs, failing to do this makes for a large percentage error.
3632 : : */
3633 : 506832 : outerstartsel = outer_skip_rows / outer_path_rows;
3634 : 506832 : innerstartsel = inner_skip_rows / inner_path_rows;
3635 : 506832 : outerendsel = outer_rows / outer_path_rows;
3636 : 506832 : innerendsel = inner_rows / inner_path_rows;
3637 : :
4489 3638 [ - + ]: 506832 : Assert(outerstartsel <= outerendsel);
3639 [ - + ]: 506832 : Assert(innerstartsel <= innerendsel);
3640 : :
3641 : : /* cost of source data */
3642 : :
8825 3643 [ + + ]: 506832 : if (outersortkeys) /* do we need to sort outer? */
3644 : : {
3645 : 241660 : cost_sort(&sort_path,
3646 : : root,
3647 : : outersortkeys,
3648 : : outer_path->total_cost,
3649 : : outer_path_rows,
2978 3650 : 241660 : outer_path->pathtarget->width,
3651 : : 0.0,
3652 : : work_mem,
3653 : : -1.0);
8825 3654 : 241660 : startup_cost += sort_path.startup_cost;
5972 3655 : 241660 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
3656 : 241660 : * outerstartsel;
8080 3657 : 241660 : run_cost += (sort_path.total_cost - sort_path.startup_cost)
5972 3658 : 241660 : * (outerendsel - outerstartsel);
3659 : : }
3660 : : else
3661 : : {
8825 3662 : 265172 : startup_cost += outer_path->startup_cost;
5972 3663 : 265172 : startup_cost += (outer_path->total_cost - outer_path->startup_cost)
3664 : 265172 : * outerstartsel;
8080 3665 : 265172 : run_cost += (outer_path->total_cost - outer_path->startup_cost)
5972 3666 : 265172 : * (outerendsel - outerstartsel);
3667 : : }
3668 : :
8825 3669 [ + + ]: 506832 : if (innersortkeys) /* do we need to sort inner? */
3670 : : {
3671 : 398073 : cost_sort(&sort_path,
3672 : : root,
3673 : : innersortkeys,
3674 : : inner_path->total_cost,
3675 : : inner_path_rows,
2978 3676 : 398073 : inner_path->pathtarget->width,
3677 : : 0.0,
3678 : : work_mem,
3679 : : -1.0);
8825 3680 : 398073 : startup_cost += sort_path.startup_cost;
5972 3681 : 398073 : startup_cost += (sort_path.total_cost - sort_path.startup_cost)
5264 3682 : 398073 : * innerstartsel;
3683 : 398073 : inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
3684 : 398073 : * (innerendsel - innerstartsel);
3685 : : }
3686 : : else
3687 : : {
8825 3688 : 108759 : startup_cost += inner_path->startup_cost;
5972 3689 : 108759 : startup_cost += (inner_path->total_cost - inner_path->startup_cost)
5264 3690 : 108759 : * innerstartsel;
3691 : 108759 : inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
3692 : 108759 : * (innerendsel - innerstartsel);
3693 : : }
3694 : :
3695 : : /*
3696 : : * We can't yet determine whether rescanning occurs, or whether
3697 : : * materialization of the inner input should be done. The minimum
3698 : : * possible inner input cost, regardless of rescan and materialization
3699 : : * considerations, is inner_run_cost. We include that in
3700 : : * workspace->total_cost, but not yet in run_cost.
3701 : : */
3702 : :
3703 : : /* CPU costs left for later */
3704 : :
3705 : : /* Public result fields */
4461 3706 : 506832 : workspace->startup_cost = startup_cost;
3707 : 506832 : workspace->total_cost = startup_cost + run_cost + inner_run_cost;
3708 : : /* Save private data for final_cost_mergejoin */
3709 : 506832 : workspace->run_cost = run_cost;
3710 : 506832 : workspace->inner_run_cost = inner_run_cost;
3711 : 506832 : workspace->outer_rows = outer_rows;
3712 : 506832 : workspace->inner_rows = inner_rows;
3713 : 506832 : workspace->outer_skip_rows = outer_skip_rows;
3714 : 506832 : workspace->inner_skip_rows = inner_skip_rows;
3715 : 506832 : }
3716 : :
3717 : : /*
3718 : : * final_cost_mergejoin
3719 : : * Final estimate of the cost and result size of a mergejoin path.
3720 : : *
3721 : : * Unlike other costsize functions, this routine makes two actual decisions:
3722 : : * whether the executor will need to do mark/restore, and whether we should
3723 : : * materialize the inner path. It would be logically cleaner to build
3724 : : * separate paths testing these alternatives, but that would require repeating
3725 : : * most of the cost calculations, which are not all that cheap. Since the
3726 : : * choice will not affect output pathkeys or startup cost, only total cost,
3727 : : * there is no possibility of wanting to keep more than one path. So it seems
3728 : : * best to make the decisions here and record them in the path's
3729 : : * skip_mark_restore and materialize_inner fields.
3730 : : *
3731 : : * Mark/restore overhead is usually required, but can be skipped if we know
3732 : : * that the executor need find only one match per outer tuple, and that the
3733 : : * mergeclauses are sufficient to identify a match.
3734 : : *
3735 : : * We materialize the inner path if we need mark/restore and either the inner
3736 : : * path can't support mark/restore, or it's cheaper to use an interposed
3737 : : * Material node to handle mark/restore.
3738 : : *
3739 : : * 'path' is already filled in except for the rows and cost fields and
3740 : : * skip_mark_restore and materialize_inner
3741 : : * 'workspace' is the result from initial_cost_mergejoin
3742 : : * 'extra' contains miscellaneous information about the join
3743 : : */
3744 : : void
3745 : 128228 : final_cost_mergejoin(PlannerInfo *root, MergePath *path,
3746 : : JoinCostWorkspace *workspace,
3747 : : JoinPathExtraData *extra)
3748 : : {
3749 : 128228 : Path *outer_path = path->jpath.outerjoinpath;
3750 : 128228 : Path *inner_path = path->jpath.innerjoinpath;
3751 : 128228 : double inner_path_rows = inner_path->rows;
3752 : 128228 : List *mergeclauses = path->path_mergeclauses;
3753 : 128228 : List *innersortkeys = path->innersortkeys;
3754 : 128228 : Cost startup_cost = workspace->startup_cost;
3755 : 128228 : Cost run_cost = workspace->run_cost;
3756 : 128228 : Cost inner_run_cost = workspace->inner_run_cost;
3757 : 128228 : double outer_rows = workspace->outer_rows;
3758 : 128228 : double inner_rows = workspace->inner_rows;
3759 : 128228 : double outer_skip_rows = workspace->outer_skip_rows;
3760 : 128228 : double inner_skip_rows = workspace->inner_skip_rows;
3761 : : Cost cpu_per_tuple,
3762 : : bare_inner_cost,
3763 : : mat_inner_cost;
3764 : : QualCost merge_qual_cost;
3765 : : QualCost qp_qual_cost;
3766 : : double mergejointuples,
3767 : : rescannedtuples;
3768 : : double rescanratio;
3769 : :
3770 : : /* Protect some assumptions below that rowcounts aren't zero */
1273 drowley@postgresql.o 3771 [ + + ]: 128228 : if (inner_path_rows <= 0)
4461 tgl@sss.pgh.pa.us 3772 : 45 : inner_path_rows = 1;
3773 : :
3774 : : /* Mark the path with the correct row estimate */
4378 3775 [ + + ]: 128228 : if (path->jpath.path.param_info)
3776 : 327 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
3777 : : else
3778 : 127901 : path->jpath.path.rows = path->jpath.path.parent->rows;
3779 : :
3780 : : /* For partial paths, scale row estimate. */
2648 rhaas@postgresql.org 3781 [ + + ]: 128228 : if (path->jpath.path.parallel_workers > 0)
3782 : : {
2524 bruce@momjian.us 3783 : 4553 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
3784 : :
2587 rhaas@postgresql.org 3785 : 4553 : path->jpath.path.rows =
3786 : 4553 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
3787 : : }
3788 : :
3789 : : /*
3790 : : * We could include disable_cost in the preliminary estimate, but that
3791 : : * would amount to optimizing for the case where the join method is
3792 : : * disabled, which doesn't seem like the way to bet.
3793 : : */
4461 tgl@sss.pgh.pa.us 3794 [ - + ]: 128228 : if (!enable_mergejoin)
4461 tgl@sss.pgh.pa.us 3795 :UBC 0 : startup_cost += disable_cost;
3796 : :
3797 : : /*
3798 : : * Compute cost of the mergequals and qpquals (other restriction clauses)
3799 : : * separately.
3800 : : */
4461 tgl@sss.pgh.pa.us 3801 :CBC 128228 : cost_qual_eval(&merge_qual_cost, mergeclauses, root);
3802 : 128228 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
3803 : 128228 : qp_qual_cost.startup -= merge_qual_cost.startup;
3804 : 128228 : qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
3805 : :
3806 : : /*
3807 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
3808 : : * executor will stop scanning for matches after the first match. When
3809 : : * all the joinclauses are merge clauses, this means we don't ever need to
3810 : : * back up the merge, and so we can skip mark/restore overhead.
3811 : : */
2564 3812 [ + + ]: 128228 : if ((path->jpath.jointype == JOIN_SEMI ||
3813 [ + + ]: 126353 : path->jpath.jointype == JOIN_ANTI ||
3814 [ + + + + ]: 183519 : extra->inner_unique) &&
3815 : 60250 : (list_length(path->jpath.joinrestrictinfo) ==
3816 : 60250 : list_length(path->path_mergeclauses)))
3817 : 51274 : path->skip_mark_restore = true;
3818 : : else
3819 : 76954 : path->skip_mark_restore = false;
3820 : :
3821 : : /*
3822 : : * Get approx # tuples passing the mergequals. We use approx_tuple_count
3823 : : * here because we need an estimate done with JOIN_INNER semantics.
3824 : : */
4461 3825 : 128228 : mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
3826 : :
3827 : : /*
3828 : : * When there are equal merge keys in the outer relation, the mergejoin
3829 : : * must rescan any matching tuples in the inner relation. This means
3830 : : * re-fetching inner tuples; we have to estimate how often that happens.
3831 : : *
3832 : : * For regular inner and outer joins, the number of re-fetches can be
3833 : : * estimated approximately as size of merge join output minus size of
3834 : : * inner relation. Assume that the distinct key values are 1, 2, ..., and
3835 : : * denote the number of values of each key in the outer relation as m1,
3836 : : * m2, ...; in the inner relation, n1, n2, ... Then we have
3837 : : *
3838 : : * size of join = m1 * n1 + m2 * n2 + ...
3839 : : *
3840 : : * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
3841 : : * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
3842 : : * relation
3843 : : *
3844 : : * This equation works correctly for outer tuples having no inner match
3845 : : * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
3846 : : * are effectively subtracting those from the number of rescanned tuples,
3847 : : * when we should not. Can we do better without expensive selectivity
3848 : : * computations?
3849 : : *
3850 : : * The whole issue is moot if we are working from a unique-ified outer
3851 : : * input, or if we know we don't need to mark/restore at all.
3852 : : */
1429 3853 [ + + + + ]: 128228 : if (IsA(outer_path, UniquePath) || path->skip_mark_restore)
4461 3854 : 51662 : rescannedtuples = 0;
3855 : : else
3856 : : {
3857 : 76566 : rescannedtuples = mergejointuples - inner_path_rows;
3858 : : /* Must clamp because of possible underestimate */
3859 [ + + ]: 76566 : if (rescannedtuples < 0)
3860 : 29871 : rescannedtuples = 0;
3861 : : }
3862 : :
3863 : : /*
3864 : : * We'll inflate various costs this much to account for rescanning. Note
3865 : : * that this is to be multiplied by something involving inner_rows, or
3866 : : * another number related to the portion of the inner rel we'll scan.
3867 : : */
1944 3868 : 128228 : rescanratio = 1.0 + (rescannedtuples / inner_rows);
3869 : :
3870 : : /*
3871 : : * Decide whether we want to materialize the inner input to shield it from
3872 : : * mark/restore and performing re-fetches. Our cost model for regular
3873 : : * re-fetches is that a re-fetch costs the same as an original fetch,
3874 : : * which is probably an overestimate; but on the other hand we ignore the
3875 : : * bookkeeping costs of mark/restore. Not clear if it's worth developing
3876 : : * a more refined model. So we just need to inflate the inner run cost by
3877 : : * rescanratio.
3878 : : */
5264 3879 : 128228 : bare_inner_cost = inner_run_cost * rescanratio;
3880 : :
3881 : : /*
3882 : : * When we interpose a Material node the re-fetch cost is assumed to be
3883 : : * just cpu_operator_cost per tuple, independently of the underlying
3884 : : * plan's cost; and we charge an extra cpu_operator_cost per original
3885 : : * fetch as well. Note that we're assuming the materialize node will
3886 : : * never spill to disk, since it only has to remember tuples back to the
3887 : : * last mark. (If there are a huge number of duplicates, our other cost
3888 : : * factors will make the path so expensive that it probably won't get
3889 : : * chosen anyway.) So we don't use cost_rescan here.
3890 : : *
3891 : : * Note: keep this estimate in sync with create_mergejoin_plan's labeling
3892 : : * of the generated Material node.
3893 : : */
3894 : 128228 : mat_inner_cost = inner_run_cost +
1944 3895 : 128228 : cpu_operator_cost * inner_rows * rescanratio;
3896 : :
3897 : : /*
3898 : : * If we don't need mark/restore at all, we don't need materialization.
3899 : : */
2564 3900 [ + + ]: 128228 : if (path->skip_mark_restore)
3901 : 51274 : path->materialize_inner = false;
3902 : :
3903 : : /*
3904 : : * Prefer materializing if it looks cheaper, unless the user has asked to
3905 : : * suppress materialization.
3906 : : */
3907 [ + + + + ]: 76954 : else if (enable_material && mat_inner_cost < bare_inner_cost)
5264 3908 : 1323 : path->materialize_inner = true;
3909 : :
3910 : : /*
3911 : : * Even if materializing doesn't look cheaper, we *must* do it if the
3912 : : * inner path is to be used directly (without sorting) and it doesn't
3913 : : * support mark/restore.
3914 : : *
3915 : : * Since the inner side must be ordered, and only Sorts and IndexScans can
3916 : : * create order to begin with, and they both support mark/restore, you
3917 : : * might think there's no problem --- but you'd be wrong. Nestloop and
3918 : : * merge joins can *preserve* the order of their inputs, so they can be
3919 : : * selected as the input of a mergejoin, and they don't support
3920 : : * mark/restore at present.
3921 : : *
3922 : : * We don't test the value of enable_material here, because
3923 : : * materialization is required for correctness in this case, and turning
3924 : : * it off does not entitle us to deliver an invalid plan.
3925 : : */
3926 [ + + ]: 75631 : else if (innersortkeys == NIL &&
3446 rhaas@postgresql.org 3927 [ + + ]: 3064 : !ExecSupportsMarkRestore(inner_path))
5264 tgl@sss.pgh.pa.us 3928 : 499 : path->materialize_inner = true;
3929 : :
3930 : : /*
3931 : : * Also, force materializing if the inner path is to be sorted and the
3932 : : * sort is expected to spill to disk. This is because the final merge
3933 : : * pass can be done on-the-fly if it doesn't have to support mark/restore.
3934 : : * We don't try to adjust the cost estimates for this consideration,
3935 : : * though.
3936 : : *
3937 : : * Since materialization is a performance optimization in this case,
3938 : : * rather than necessary for correctness, we skip it if enable_material is
3939 : : * off.
3940 : : */
5109 rhaas@postgresql.org 3941 [ + + + + ]: 75132 : else if (enable_material && innersortkeys != NIL &&
2978 tgl@sss.pgh.pa.us 3942 : 72543 : relation_byte_size(inner_path_rows,
3943 : 72543 : inner_path->pathtarget->width) >
5264 3944 [ + + ]: 72543 : (work_mem * 1024L))
3945 : 86 : path->materialize_inner = true;
3946 : : else
3947 : 75046 : path->materialize_inner = false;
3948 : :
3949 : : /* Charge the right incremental cost for the chosen case */
3950 [ + + ]: 128228 : if (path->materialize_inner)
3951 : 1908 : run_cost += mat_inner_cost;
3952 : : else
3953 : 126320 : run_cost += bare_inner_cost;
3954 : :
3955 : : /* CPU costs */
3956 : :
3957 : : /*
3958 : : * The number of tuple comparisons needed is approximately number of outer
3959 : : * rows plus number of inner rows plus number of rescanned tuples (can we
3960 : : * refine this?). At each one, we need to evaluate the mergejoin quals.
3961 : : */
7748 3962 : 128228 : startup_cost += merge_qual_cost.startup;
5972 3963 : 128228 : startup_cost += merge_qual_cost.per_tuple *
3964 : 128228 : (outer_skip_rows + inner_skip_rows * rescanratio);
7748 3965 : 128228 : run_cost += merge_qual_cost.per_tuple *
5972 3966 : 128228 : ((outer_rows - outer_skip_rows) +
3967 : 128228 : (inner_rows - inner_skip_rows) * rescanratio);
3968 : :
3969 : : /*
3970 : : * For each tuple that gets through the mergejoin proper, we charge
3971 : : * cpu_tuple_cost plus the cost of evaluating additional restriction
3972 : : * clauses that are to be applied at the join. (This is pessimistic since
3973 : : * not all of the quals may get evaluated at each tuple.)
3974 : : *
3975 : : * Note: we could adjust for SEMI/ANTI joins skipping some qual
3976 : : * evaluations here, but it's probably not worth the trouble.
3977 : : */
7748 3978 : 128228 : startup_cost += qp_qual_cost.startup;
3979 : 128228 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
5720 3980 : 128228 : run_cost += cpu_per_tuple * mergejointuples;
3981 : :
3982 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 3983 : 128228 : startup_cost += path->jpath.path.pathtarget->cost.startup;
3984 : 128228 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
3985 : :
7748 3986 : 128228 : path->jpath.path.startup_cost = startup_cost;
3987 : 128228 : path->jpath.path.total_cost = startup_cost + run_cost;
10141 scrappy@hub.org 3988 : 128228 : }
3989 : :
3990 : : /*
3991 : : * run mergejoinscansel() with caching
3992 : : */
3993 : : static MergeScanSelCache *
5995 bruce@momjian.us 3994 : 503780 : cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
3995 : : {
3996 : : MergeScanSelCache *cache;
3997 : : ListCell *lc;
3998 : : Selectivity leftstartsel,
3999 : : leftendsel,
4000 : : rightstartsel,
4001 : : rightendsel;
4002 : : MemoryContext oldcontext;
4003 : :
4004 : : /* Do we have this result already? */
6292 tgl@sss.pgh.pa.us 4005 [ + + + + : 503801 : foreach(lc, rinfo->scansel_cache)
+ + ]
4006 : : {
4007 : 455489 : cache = (MergeScanSelCache *) lfirst(lc);
4008 [ + - ]: 455489 : if (cache->opfamily == pathkey->pk_opfamily &&
4775 4009 [ + - ]: 455489 : cache->collation == pathkey->pk_eclass->ec_collation &&
6292 4010 [ + + ]: 455489 : cache->strategy == pathkey->pk_strategy &&
4011 [ + - ]: 455468 : cache->nulls_first == pathkey->pk_nulls_first)
4012 : 455468 : return cache;
4013 : : }
4014 : :
4015 : : /* Nope, do the computation */
4016 : 48312 : mergejoinscansel(root,
4017 : 48312 : (Node *) rinfo->clause,
4018 : : pathkey->pk_opfamily,
4019 : : pathkey->pk_strategy,
4020 : 48312 : pathkey->pk_nulls_first,
4021 : : &leftstartsel,
4022 : : &leftendsel,
4023 : : &rightstartsel,
4024 : : &rightendsel);
4025 : :
4026 : : /* Cache the result in suitably long-lived workspace */
4027 : 48312 : oldcontext = MemoryContextSwitchTo(root->planner_cxt);
4028 : :
4029 : 48312 : cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
4030 : 48312 : cache->opfamily = pathkey->pk_opfamily;
4775 4031 : 48312 : cache->collation = pathkey->pk_eclass->ec_collation;
6292 4032 : 48312 : cache->strategy = pathkey->pk_strategy;
4033 : 48312 : cache->nulls_first = pathkey->pk_nulls_first;
5972 4034 : 48312 : cache->leftstartsel = leftstartsel;
4035 : 48312 : cache->leftendsel = leftendsel;
4036 : 48312 : cache->rightstartsel = rightstartsel;
4037 : 48312 : cache->rightendsel = rightendsel;
4038 : :
6292 4039 : 48312 : rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
4040 : :
4041 : 48312 : MemoryContextSwitchTo(oldcontext);
4042 : :
4043 : 48312 : return cache;
4044 : : }
4045 : :
4046 : : /*
4047 : : * initial_cost_hashjoin
4048 : : * Preliminary estimate of the cost of a hashjoin path.
4049 : : *
4050 : : * This must quickly produce lower-bound estimates of the path's startup and
4051 : : * total costs. If we are unable to eliminate the proposed path from
4052 : : * consideration using the lower bounds, final_cost_hashjoin will be called
4053 : : * to obtain the final estimates.
4054 : : *
4055 : : * The exact division of labor between this function and final_cost_hashjoin
4056 : : * is private to them, and represents a tradeoff between speed of the initial
4057 : : * estimate and getting a tight lower bound. We choose to not examine the
4058 : : * join quals here (other than by counting the number of hash clauses),
4059 : : * so we can't do much with CPU costs. We do assume that
4060 : : * ExecChooseHashTableSize is cheap enough to use here.
4061 : : *
4062 : : * 'workspace' is to be filled with startup_cost, total_cost, and perhaps
4063 : : * other data to be used by final_cost_hashjoin
4064 : : * 'jointype' is the type of join to be performed
4065 : : * 'hashclauses' is the list of joinclauses to be used as hash clauses
4066 : : * 'outer_path' is the outer input to the join
4067 : : * 'inner_path' is the inner input to the join
4068 : : * 'extra' contains miscellaneous information about the join
4069 : : * 'parallel_hash' indicates that inner_path is partial and that a shared
4070 : : * hash table will be built in parallel
4071 : : */
4072 : : void
4461 4073 : 270053 : initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
4074 : : JoinType jointype,
4075 : : List *hashclauses,
4076 : : Path *outer_path, Path *inner_path,
4077 : : JoinPathExtraData *extra,
4078 : : bool parallel_hash)
4079 : : {
8825 4080 : 270053 : Cost startup_cost = 0;
4081 : 270053 : Cost run_cost = 0;
4461 4082 : 270053 : double outer_path_rows = outer_path->rows;
4083 : 270053 : double inner_path_rows = inner_path->rows;
2307 andres@anarazel.de 4084 : 270053 : double inner_path_rows_total = inner_path_rows;
7259 neilc@samurai.com 4085 : 270053 : int num_hashclauses = list_length(hashclauses);
4086 : : int numbuckets;
4087 : : int numbatches;
4088 : : int num_skew_mcvs;
4089 : : size_t space_allowed; /* unused */
4090 : :
4091 : : /* cost of source data */
8825 tgl@sss.pgh.pa.us 4092 : 270053 : startup_cost += outer_path->startup_cost;
4093 : 270053 : run_cost += outer_path->total_cost - outer_path->startup_cost;
4094 : 270053 : startup_cost += inner_path->total_cost;
4095 : :
4096 : : /*
4097 : : * Cost of computing hash function: must do it once per input tuple. We
4098 : : * charge one cpu_operator_cost for each column's hash function. Also,
4099 : : * tack on one cpu_tuple_cost per inner row, to model the costs of
4100 : : * inserting the row into the hashtable.
4101 : : *
4102 : : * XXX when a hashclause is more complex than a single operator, we really
4103 : : * should charge the extra eval costs of the left or right side, as
4104 : : * appropriate, here. This seems more work than it's worth at the moment.
4105 : : */
6306 4106 : 270053 : startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
4107 : 270053 : * inner_path_rows;
7748 4108 : 270053 : run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
4109 : :
4110 : : /*
4111 : : * If this is a parallel hash build, then the value we have for
4112 : : * inner_rows_total currently refers only to the rows returned by each
4113 : : * participant. For shared hash table size estimation, we need the total
4114 : : * number, so we need to undo the division.
4115 : : */
2307 andres@anarazel.de 4116 [ + + ]: 270053 : if (parallel_hash)
4117 : 5787 : inner_path_rows_total *= get_parallel_divisor(inner_path);
4118 : :
4119 : : /*
4120 : : * Get hash table size that executor would use for inner relation.
4121 : : *
4122 : : * XXX for the moment, always assume that skew optimization will be
4123 : : * performed. As long as SKEW_HASH_MEM_PERCENT is small, it's not worth
4124 : : * trying to determine that for sure.
4125 : : *
4126 : : * XXX at some point it might be interesting to try to account for skew
4127 : : * optimization in the cost estimate, but for now, we don't.
4128 : : */
4129 : 270053 : ExecChooseHashTableSize(inner_path_rows_total,
2978 tgl@sss.pgh.pa.us 4130 : 270053 : inner_path->pathtarget->width,
4131 : : true, /* useskew */
4132 : : parallel_hash, /* try_combined_hash_mem */
4133 : : outer_path->parallel_workers,
4134 : : &space_allowed,
4135 : : &numbuckets,
4136 : : &numbatches,
4137 : : &num_skew_mcvs);
4138 : :
4139 : : /*
4140 : : * If inner relation is too big then we will need to "batch" the join,
4141 : : * which implies writing and reading most of the tuples to disk an extra
4142 : : * time. Charge seq_page_cost per page, since the I/O should be nice and
4143 : : * sequential. Writing the inner rel counts as startup cost, all the rest
4144 : : * as run cost.
4145 : : */
4461 4146 [ + + ]: 270053 : if (numbatches > 1)
4147 : : {
4148 : 2270 : double outerpages = page_size(outer_path_rows,
2978 4149 : 2270 : outer_path->pathtarget->width);
4461 4150 : 2270 : double innerpages = page_size(inner_path_rows,
2978 4151 : 2270 : inner_path->pathtarget->width);
4152 : :
4461 4153 : 2270 : startup_cost += seq_page_cost * innerpages;
4154 : 2270 : run_cost += seq_page_cost * (innerpages + 2 * outerpages);
4155 : : }
4156 : :
4157 : : /* CPU costs left for later */
4158 : :
4159 : : /* Public result fields */
4160 : 270053 : workspace->startup_cost = startup_cost;
4161 : 270053 : workspace->total_cost = startup_cost + run_cost;
4162 : : /* Save private data for final_cost_hashjoin */
4163 : 270053 : workspace->run_cost = run_cost;
4164 : 270053 : workspace->numbuckets = numbuckets;
4165 : 270053 : workspace->numbatches = numbatches;
2307 andres@anarazel.de 4166 : 270053 : workspace->inner_rows_total = inner_path_rows_total;
4461 tgl@sss.pgh.pa.us 4167 : 270053 : }
4168 : :
4169 : : /*
4170 : : * final_cost_hashjoin
4171 : : * Final estimate of the cost and result size of a hashjoin path.
4172 : : *
4173 : : * Note: the numbatches estimate is also saved into 'path' for use later
4174 : : *
4175 : : * 'path' is already filled in except for the rows and cost fields and
4176 : : * num_batches
4177 : : * 'workspace' is the result from initial_cost_hashjoin
4178 : : * 'extra' contains miscellaneous information about the join
4179 : : */
4180 : : void
4181 : 112112 : final_cost_hashjoin(PlannerInfo *root, HashPath *path,
4182 : : JoinCostWorkspace *workspace,
4183 : : JoinPathExtraData *extra)
4184 : : {
4185 : 112112 : Path *outer_path = path->jpath.outerjoinpath;
4186 : 112112 : Path *inner_path = path->jpath.innerjoinpath;
4187 : 112112 : double outer_path_rows = outer_path->rows;
4188 : 112112 : double inner_path_rows = inner_path->rows;
2307 andres@anarazel.de 4189 : 112112 : double inner_path_rows_total = workspace->inner_rows_total;
4461 tgl@sss.pgh.pa.us 4190 : 112112 : List *hashclauses = path->path_hashclauses;
4191 : 112112 : Cost startup_cost = workspace->startup_cost;
4192 : 112112 : Cost run_cost = workspace->run_cost;
4193 : 112112 : int numbuckets = workspace->numbuckets;
4194 : 112112 : int numbatches = workspace->numbatches;
4195 : : Cost cpu_per_tuple;
4196 : : QualCost hash_qual_cost;
4197 : : QualCost qp_qual_cost;
4198 : : double hashjointuples;
4199 : : double virtualbuckets;
4200 : : Selectivity innerbucketsize;
4201 : : Selectivity innermcvfreq;
4202 : : ListCell *hcl;
4203 : :
4204 : : /* Mark the path with the correct row estimate */
4378 4205 [ + + ]: 112112 : if (path->jpath.path.param_info)
4206 : 640 : path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
4207 : : else
4208 : 111472 : path->jpath.path.rows = path->jpath.path.parent->rows;
4209 : :
4210 : : /* For partial paths, scale row estimate. */
2648 rhaas@postgresql.org 4211 [ + + ]: 112112 : if (path->jpath.path.parallel_workers > 0)
4212 : : {
2524 bruce@momjian.us 4213 : 5286 : double parallel_divisor = get_parallel_divisor(&path->jpath.path);
4214 : :
2587 rhaas@postgresql.org 4215 : 5286 : path->jpath.path.rows =
4216 : 5286 : clamp_row_est(path->jpath.path.rows / parallel_divisor);
4217 : : }
4218 : :
4219 : : /*
4220 : : * We could include disable_cost in the preliminary estimate, but that
4221 : : * would amount to optimizing for the case where the join method is
4222 : : * disabled, which doesn't seem like the way to bet.
4223 : : */
4461 tgl@sss.pgh.pa.us 4224 [ + + ]: 112112 : if (!enable_hashjoin)
4225 : 102 : startup_cost += disable_cost;
4226 : :
4227 : : /* mark the path with estimated # of batches */
5498 4228 : 112112 : path->num_batches = numbatches;
4229 : :
4230 : : /* store the total number of tuples (sum of partial row estimates) */
2307 andres@anarazel.de 4231 : 112112 : path->inner_rows_total = inner_path_rows_total;
4232 : :
4233 : : /* and compute the number of "virtual" buckets in the whole join */
2489 tgl@sss.pgh.pa.us 4234 : 112112 : virtualbuckets = (double) numbuckets * (double) numbatches;
4235 : :
4236 : : /*
4237 : : * Determine bucketsize fraction and MCV frequency for the inner relation.
4238 : : * We use the smallest bucketsize or MCV frequency estimated for any
4239 : : * individual hashclause; this is undoubtedly conservative.
4240 : : *
4241 : : * BUT: if inner relation has been unique-ified, we can assume it's good
4242 : : * for hashing. This is important both because it's the right answer, and
4243 : : * because we avoid contaminating the cache with a value that's wrong for
4244 : : * non-unique-ified paths.
4245 : : */
7747 4246 [ + + ]: 112112 : if (IsA(inner_path, UniquePath))
4247 : : {
4248 : 1024 : innerbucketsize = 1.0 / virtualbuckets;
2434 4249 : 1024 : innermcvfreq = 0.0;
4250 : : }
4251 : : else
4252 : : {
7747 4253 : 111088 : innerbucketsize = 1.0;
2434 4254 : 111088 : innermcvfreq = 1.0;
7747 4255 [ + - + + : 233409 : foreach(hcl, hashclauses)
+ + ]
4256 : : {
2561 4257 : 122321 : RestrictInfo *restrictinfo = lfirst_node(RestrictInfo, hcl);
4258 : : Selectivity thisbucketsize;
4259 : : Selectivity thismcvfreq;
4260 : :
4261 : : /*
4262 : : * First we have to figure out which side of the hashjoin clause
4263 : : * is the inner side.
4264 : : *
4265 : : * Since we tend to visit the same clauses over and over when
4266 : : * planning a large query, we cache the bucket stats estimates in
4267 : : * the RestrictInfo node to avoid repeated lookups of statistics.
4268 : : */
7736 4269 [ + + ]: 122321 : if (bms_is_subset(restrictinfo->right_relids,
4270 : 122321 : inner_path->parent->relids))
4271 : : {
4272 : : /* righthand side is inner */
7747 4273 : 65500 : thisbucketsize = restrictinfo->right_bucketsize;
4274 [ + + ]: 65500 : if (thisbucketsize < 0)
4275 : : {
4276 : : /* not cached yet */
2434 4277 : 35575 : estimate_hash_bucket_stats(root,
4278 : 35575 : get_rightop(restrictinfo->clause),
4279 : : virtualbuckets,
4280 : : &restrictinfo->right_mcvfreq,
4281 : : &restrictinfo->right_bucketsize);
4282 : 35575 : thisbucketsize = restrictinfo->right_bucketsize;
4283 : : }
4284 : 65500 : thismcvfreq = restrictinfo->right_mcvfreq;
4285 : : }
4286 : : else
4287 : : {
7736 4288 [ - + ]: 56821 : Assert(bms_is_subset(restrictinfo->left_relids,
4289 : : inner_path->parent->relids));
4290 : : /* lefthand side is inner */
7747 4291 : 56821 : thisbucketsize = restrictinfo->left_bucketsize;
4292 [ + + ]: 56821 : if (thisbucketsize < 0)
4293 : : {
4294 : : /* not cached yet */
2434 4295 : 30330 : estimate_hash_bucket_stats(root,
4296 : 30330 : get_leftop(restrictinfo->clause),
4297 : : virtualbuckets,
4298 : : &restrictinfo->left_mcvfreq,
4299 : : &restrictinfo->left_bucketsize);
4300 : 30330 : thisbucketsize = restrictinfo->left_bucketsize;
4301 : : }
4302 : 56821 : thismcvfreq = restrictinfo->left_mcvfreq;
4303 : : }
4304 : :
7747 4305 [ + + ]: 122321 : if (innerbucketsize > thisbucketsize)
4306 : 80872 : innerbucketsize = thisbucketsize;
2434 4307 [ + + ]: 122321 : if (innermcvfreq > thismcvfreq)
4308 : 113535 : innermcvfreq = thismcvfreq;
4309 : : }
4310 : : }
4311 : :
4312 : : /*
4313 : : * If the bucket holding the inner MCV would exceed hash_mem, we don't
4314 : : * want to hash unless there is really no other alternative, so apply
4315 : : * disable_cost. (The executor normally copes with excessive memory usage
4316 : : * by splitting batches, but obviously it cannot separate equal values
4317 : : * that way, so it will be unable to drive the batch size below hash_mem
4318 : : * when this is true.)
4319 : : */
4320 : 112112 : if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
994 4321 [ - + ]: 224224 : inner_path->pathtarget->width) > get_hash_memory_limit())
2434 tgl@sss.pgh.pa.us 4322 :UBC 0 : startup_cost += disable_cost;
4323 : :
4324 : : /*
4325 : : * Compute cost of the hashquals and qpquals (other restriction clauses)
4326 : : * separately.
4327 : : */
4461 tgl@sss.pgh.pa.us 4328 :CBC 112112 : cost_qual_eval(&hash_qual_cost, hashclauses, root);
4329 : 112112 : cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
4330 : 112112 : qp_qual_cost.startup -= hash_qual_cost.startup;
4331 : 112112 : qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
4332 : :
4333 : : /* CPU costs */
4334 : :
2564 4335 [ + + ]: 112112 : if (path->jpath.jointype == JOIN_SEMI ||
4336 [ + + ]: 110647 : path->jpath.jointype == JOIN_ANTI ||
4337 [ + + ]: 108559 : extra->inner_unique)
5454 4338 : 47809 : {
4339 : : double outer_matched_rows;
4340 : : Selectivity inner_scan_frac;
4341 : :
4342 : : /*
4343 : : * With a SEMI or ANTI join, or if the innerrel is known unique, the
4344 : : * executor will stop after the first match.
4345 : : *
4346 : : * For an outer-rel row that has at least one match, we can expect the
4347 : : * bucket scan to stop after a fraction 1/(match_count+1) of the
4348 : : * bucket's rows, if the matches are evenly distributed. Since they
4349 : : * probably aren't quite evenly distributed, we apply a fuzz factor of
4350 : : * 2.0 to that fraction. (If we used a larger fuzz factor, we'd have
4351 : : * to clamp inner_scan_frac to at most 1.0; but since match_count is
4352 : : * at least 1, no such clamp is needed now.)
4353 : : */
2564 4354 : 47809 : outer_matched_rows = rint(outer_path_rows * extra->semifactors.outer_match_frac);
4355 : 47809 : inner_scan_frac = 2.0 / (extra->semifactors.match_count + 1.0);
4356 : :
5454 4357 : 47809 : startup_cost += hash_qual_cost.startup;
4358 : 95618 : run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
4359 : 47809 : clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
4360 : :
4361 : : /*
4362 : : * For unmatched outer-rel rows, the picture is quite a lot different.
4363 : : * In the first place, there is no reason to assume that these rows
4364 : : * preferentially hit heavily-populated buckets; instead assume they
4365 : : * are uncorrelated with the inner distribution and so they see an
4366 : : * average bucket size of inner_path_rows / virtualbuckets. In the
4367 : : * second place, it seems likely that they will have few if any exact
4368 : : * hash-code matches and so very few of the tuples in the bucket will
4369 : : * actually require eval of the hash quals. We don't have any good
4370 : : * way to estimate how many will, but for the moment assume that the
4371 : : * effective cost per bucket entry is one-tenth what it is for
4372 : : * matchable tuples.
4373 : : */
4374 : 95618 : run_cost += hash_qual_cost.per_tuple *
4375 : 95618 : (outer_path_rows - outer_matched_rows) *
4376 : 47809 : clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
4377 : :
4378 : : /* Get # of tuples that will pass the basic join */
2101 4379 [ + + ]: 47809 : if (path->jpath.jointype == JOIN_ANTI)
5454 4380 : 2088 : hashjointuples = outer_path_rows - outer_matched_rows;
4381 : : else
2101 4382 : 45721 : hashjointuples = outer_matched_rows;
4383 : : }
4384 : : else
4385 : : {
4386 : : /*
4387 : : * The number of tuple comparisons needed is the number of outer
4388 : : * tuples times the typical number of tuples in a hash bucket, which
4389 : : * is the inner relation size times its bucketsize fraction. At each
4390 : : * one, we need to evaluate the hashjoin quals. But actually,
4391 : : * charging the full qual eval cost at each tuple is pessimistic,
4392 : : * since we don't evaluate the quals unless the hash values match
4393 : : * exactly. For lack of a better idea, halve the cost estimate to
4394 : : * allow for that.
4395 : : */
5454 4396 : 64303 : startup_cost += hash_qual_cost.startup;
4397 : 128606 : run_cost += hash_qual_cost.per_tuple * outer_path_rows *
4398 : 64303 : clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
4399 : :
4400 : : /*
4401 : : * Get approx # tuples passing the hashquals. We use
4402 : : * approx_tuple_count here because we need an estimate done with
4403 : : * JOIN_INNER semantics.
4404 : : */
4405 : 64303 : hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
4406 : : }
4407 : :
4408 : : /*
4409 : : * For each tuple that gets through the hashjoin proper, we charge
4410 : : * cpu_tuple_cost plus the cost of evaluating additional restriction
4411 : : * clauses that are to be applied at the join. (This is pessimistic since
4412 : : * not all of the quals may get evaluated at each tuple.)
4413 : : */
7748 4414 : 112112 : startup_cost += qp_qual_cost.startup;
4415 : 112112 : cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
5720 4416 : 112112 : run_cost += cpu_per_tuple * hashjointuples;
4417 : :
4418 : : /* tlist eval costs are paid per output row, not per tuple scanned */
2978 4419 : 112112 : startup_cost += path->jpath.path.pathtarget->cost.startup;
4420 : 112112 : run_cost += path->jpath.path.pathtarget->cost.per_tuple * path->jpath.path.rows;
4421 : :
7748 4422 : 112112 : path->jpath.path.startup_cost = startup_cost;
4423 : 112112 : path->jpath.path.total_cost = startup_cost + run_cost;
8825 4424 : 112112 : }
4425 : :
4426 : :
4427 : : /*
4428 : : * cost_subplan
4429 : : * Figure the costs for a SubPlan (or initplan).
4430 : : *
4431 : : * Note: we could dig the subplan's Plan out of the root list, but in practice
4432 : : * all callers have it handy already, so we make them pass it.
4433 : : */
4434 : : void
5714 4435 : 18475 : cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
4436 : : {
4437 : : QualCost sp_cost;
4438 : :
4439 : : /* Figure any cost for evaluating the testexpr */
4440 : 18475 : cost_qual_eval(&sp_cost,
4441 : 18475 : make_ands_implicit((Expr *) subplan->testexpr),
4442 : : root);
4443 : :
4444 [ + + ]: 18475 : if (subplan->useHashTable)
4445 : : {
4446 : : /*
4447 : : * If we are using a hash table for the subquery outputs, then the
4448 : : * cost of evaluating the query is a one-time cost. We charge one
4449 : : * cpu_operator_cost per tuple for the work of loading the hashtable,
4450 : : * too.
4451 : : */
4452 : 946 : sp_cost.startup += plan->total_cost +
4453 : 946 : cpu_operator_cost * plan->plan_rows;
4454 : :
4455 : : /*
4456 : : * The per-tuple costs include the cost of evaluating the lefthand
4457 : : * expressions, plus the cost of probing the hashtable. We already
4458 : : * accounted for the lefthand expressions as part of the testexpr, and
4459 : : * will also have counted one cpu_operator_cost for each comparison
4460 : : * operator. That is probably too low for the probing cost, but it's
4461 : : * hard to make a better estimate, so live with it for now.
4462 : : */
4463 : : }
4464 : : else
4465 : : {
4466 : : /*
4467 : : * Otherwise we will be rescanning the subplan output on each
4468 : : * evaluation. We need to estimate how much of the output we will
4469 : : * actually need to scan. NOTE: this logic should agree with the
4470 : : * tuple_fraction estimates used by make_subplan() in
4471 : : * plan/subselect.c.
4472 : : */
4473 : 17529 : Cost plan_run_cost = plan->total_cost - plan->startup_cost;
4474 : :
4475 [ + + ]: 17529 : if (subplan->subLinkType == EXISTS_SUBLINK)
4476 : : {
4477 : : /* we only need to fetch 1 tuple; clamp to avoid zero divide */
2941 4478 : 1032 : sp_cost.per_tuple += plan_run_cost / clamp_row_est(plan->plan_rows);
4479 : : }
5714 4480 [ + + ]: 16497 : else if (subplan->subLinkType == ALL_SUBLINK ||
4481 [ + + ]: 16488 : subplan->subLinkType == ANY_SUBLINK)
4482 : : {
4483 : : /* assume we need 50% of the tuples */
4484 : 50 : sp_cost.per_tuple += 0.50 * plan_run_cost;
4485 : : /* also charge a cpu_operator_cost per row examined */
4486 : 50 : sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
4487 : : }
4488 : : else
4489 : : {
4490 : : /* assume we need all tuples */
4491 : 16447 : sp_cost.per_tuple += plan_run_cost;
4492 : : }
4493 : :
4494 : : /*
4495 : : * Also account for subplan's startup cost. If the subplan is
4496 : : * uncorrelated or undirect correlated, AND its topmost node is one
4497 : : * that materializes its output, assume that we'll only need to pay
4498 : : * its startup cost once; otherwise assume we pay the startup cost
4499 : : * every time.
4500 : : */
4501 [ + + + + ]: 23628 : if (subplan->parParam == NIL &&
5328 4502 : 6099 : ExecMaterializesOutput(nodeTag(plan)))
5714 4503 : 769 : sp_cost.startup += plan->startup_cost;
4504 : : else
4505 : 16760 : sp_cost.per_tuple += plan->startup_cost;
4506 : : }
4507 : :
4508 : 18475 : subplan->startup_cost = sp_cost.startup;
4509 : 18475 : subplan->per_call_cost = sp_cost.per_tuple;
4510 : 18475 : }
4511 : :
4512 : :
4513 : : /*
4514 : : * cost_rescan
4515 : : * Given a finished Path, estimate the costs of rescanning it after
4516 : : * having done so the first time. For some Path types a rescan is
4517 : : * cheaper than an original scan (if no parameters change), and this
4518 : : * function embodies knowledge about that. The default is to return
4519 : : * the same costs stored in the Path. (Note that the cost estimates
4520 : : * actually stored in Paths are always for first scans.)
4521 : : *
4522 : : * This function is not currently intended to model effects such as rescans
4523 : : * being cheaper due to disk block caching; what we are concerned with is
4524 : : * plan types wherein the executor caches results explicitly, or doesn't
4525 : : * redo startup calculations, etc.
4526 : : */
4527 : : static void
5328 4528 : 1129791 : cost_rescan(PlannerInfo *root, Path *path,
4529 : : Cost *rescan_startup_cost, /* output parameters */
4530 : : Cost *rescan_total_cost)
4531 : : {
4532 [ + + + + : 1129791 : switch (path->pathtype)
+ + ]
4533 : : {
4534 : 20152 : case T_FunctionScan:
4535 : :
4536 : : /*
4537 : : * Currently, nodeFunctionscan.c always executes the function to
4538 : : * completion before returning any rows, and caches the results in
4539 : : * a tuplestore. So the function eval cost is all startup cost
4540 : : * and isn't paid over again on rescans. However, all run costs
4541 : : * will be paid over again.
4542 : : */
4543 : 20152 : *rescan_startup_cost = 0;
4544 : 20152 : *rescan_total_cost = path->total_cost - path->startup_cost;
4545 : 20152 : break;
4546 : 50082 : case T_HashJoin:
4547 : :
4548 : : /*
4549 : : * If it's a single-batch join, we don't need to rebuild the hash
4550 : : * table during a rescan.
4551 : : */
2818 4552 [ + - ]: 50082 : if (((HashPath *) path)->num_batches == 1)
4553 : : {
4554 : : /* Startup cost is exactly the cost of hash table building */
4555 : 50082 : *rescan_startup_cost = 0;
4556 : 50082 : *rescan_total_cost = path->total_cost - path->startup_cost;
4557 : : }
4558 : : else
4559 : : {
4560 : : /* Otherwise, no special treatment */
2818 tgl@sss.pgh.pa.us 4561 :UBC 0 : *rescan_startup_cost = path->startup_cost;
4562 : 0 : *rescan_total_cost = path->total_cost;
4563 : : }
5328 tgl@sss.pgh.pa.us 4564 :CBC 50082 : break;
4565 : 3285 : case T_CteScan:
4566 : : case T_WorkTableScan:
4567 : : {
4568 : : /*
4569 : : * These plan types materialize their final result in a
4570 : : * tuplestore or tuplesort object. So the rescan cost is only
4571 : : * cpu_tuple_cost per tuple, unless the result is large enough
4572 : : * to spill to disk.
4573 : : */
4461 4574 : 3285 : Cost run_cost = cpu_tuple_cost * path->rows;
4575 : 3285 : double nbytes = relation_byte_size(path->rows,
2489 4576 : 3285 : path->pathtarget->width);
5161 bruce@momjian.us 4577 : 3285 : long work_mem_bytes = work_mem * 1024L;
4578 : :
5328 tgl@sss.pgh.pa.us 4579 [ + + ]: 3285 : if (nbytes > work_mem_bytes)
4580 : : {
4581 : : /* It will spill, so account for re-read cost */
4582 : 80 : double npages = ceil(nbytes / BLCKSZ);
4583 : :
4584 : 80 : run_cost += seq_page_cost * npages;
4585 : : }
4586 : 3285 : *rescan_startup_cost = 0;
4587 : 3285 : *rescan_total_cost = run_cost;
4588 : : }
4589 : 3285 : break;
5168 4590 : 382439 : case T_Material:
4591 : : case T_Sort:
4592 : : {
4593 : : /*
4594 : : * These plan types not only materialize their results, but do
4595 : : * not implement qual filtering or projection. So they are
4596 : : * even cheaper to rescan than the ones above. We charge only
4597 : : * cpu_operator_cost per tuple. (Note: keep that in sync with
4598 : : * the run_cost charge in cost_sort, and also see comments in
4599 : : * cost_material before you change it.)
4600 : : */
4461 4601 : 382439 : Cost run_cost = cpu_operator_cost * path->rows;
4602 : 382439 : double nbytes = relation_byte_size(path->rows,
2489 4603 : 382439 : path->pathtarget->width);
5161 bruce@momjian.us 4604 : 382439 : long work_mem_bytes = work_mem * 1024L;
4605 : :
5168 tgl@sss.pgh.pa.us 4606 [ + + ]: 382439 : if (nbytes > work_mem_bytes)
4607 : : {
4608 : : /* It will spill, so account for re-read cost */
4609 : 4536 : double npages = ceil(nbytes / BLCKSZ);
4610 : :
4611 : 4536 : run_cost += seq_page_cost * npages;
4612 : : }
4613 : 382439 : *rescan_startup_cost = 0;
4614 : 382439 : *rescan_total_cost = run_cost;
4615 : : }
4616 : 382439 : break;
1005 drowley@postgresql.o 4617 : 112582 : case T_Memoize:
4618 : : /* All the hard work is done by cost_memoize_rescan */
4619 : 112582 : cost_memoize_rescan(root, (MemoizePath *) path,
4620 : : rescan_startup_cost, rescan_total_cost);
1108 4621 : 112582 : break;
5328 tgl@sss.pgh.pa.us 4622 : 561251 : default:
4623 : 561251 : *rescan_startup_cost = path->startup_cost;
4624 : 561251 : *rescan_total_cost = path->total_cost;
4625 : 561251 : break;
4626 : : }
4627 : 1129791 : }
4628 : :
4629 : :
4630 : : /*
4631 : : * cost_qual_eval
4632 : : * Estimate the CPU costs of evaluating a WHERE clause.
4633 : : * The input can be either an implicitly-ANDed list of boolean
4634 : : * expressions, or a list of RestrictInfo nodes. (The latter is
4635 : : * preferred since it allows caching of the results.)
4636 : : * The result includes both a one-time (startup) component,
4637 : : * and a per-evaluation component.
4638 : : */
4639 : : void
6261 4640 : 1606015 : cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
4641 : : {
4642 : : cost_qual_eval_context context;
4643 : : ListCell *l;
4644 : :
4645 : 1606015 : context.root = root;
4646 : 1606015 : context.total.startup = 0;
4647 : 1606015 : context.total.per_tuple = 0;
4648 : :
4649 : : /* We don't charge any cost for the implicit ANDing at top level ... */
4650 : :
8524 4651 [ + + + + : 2997937 : foreach(l, quals)
+ + ]
4652 : : {
8424 bruce@momjian.us 4653 : 1391922 : Node *qual = (Node *) lfirst(l);
4654 : :
6261 tgl@sss.pgh.pa.us 4655 : 1391922 : cost_qual_eval_walker(qual, &context);
4656 : : }
4657 : :
4658 : 1606015 : *cost = context.total;
8825 4659 : 1606015 : }
4660 : :
4661 : : /*
4662 : : * cost_qual_eval_node
4663 : : * As above, for a single RestrictInfo or expression.
4664 : : */
4665 : : void
6261 4666 : 758429 : cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
4667 : : {
4668 : : cost_qual_eval_context context;
4669 : :
4670 : 758429 : context.root = root;
4671 : 758429 : context.total.startup = 0;
4672 : 758429 : context.total.per_tuple = 0;
4673 : :
4674 : 758429 : cost_qual_eval_walker(qual, &context);
4675 : :
4676 : 758429 : *cost = context.total;
6292 4677 : 758429 : }
4678 : :
4679 : : static bool
5995 bruce@momjian.us 4680 : 3633469 : cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
4681 : : {
8825 tgl@sss.pgh.pa.us 4682 [ + + ]: 3633469 : if (node == NULL)
4683 : 46087 : return false;
4684 : :
4685 : : /*
4686 : : * RestrictInfo nodes contain an eval_cost field reserved for this
4687 : : * routine's use, so that it's not necessary to evaluate the qual clause's
4688 : : * cost more than once. If the clause's cost hasn't been computed yet,
4689 : : * the field's startup value will contain -1.
4690 : : */
6292 4691 [ + + ]: 3587382 : if (IsA(node, RestrictInfo))
4692 : : {
4693 : 1460694 : RestrictInfo *rinfo = (RestrictInfo *) node;
4694 : :
4695 [ + + ]: 1460694 : if (rinfo->eval_cost.startup < 0)
4696 : : {
4697 : : cost_qual_eval_context locContext;
4698 : :
6261 4699 : 243906 : locContext.root = context->root;
4700 : 243906 : locContext.total.startup = 0;
4701 : 243906 : locContext.total.per_tuple = 0;
4702 : :
4703 : : /*
4704 : : * For an OR clause, recurse into the marked-up tree so that we
4705 : : * set the eval_cost for contained RestrictInfos too.
4706 : : */
6292 4707 [ + + ]: 243906 : if (rinfo->orclause)
6261 4708 : 3921 : cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
4709 : : else
4710 : 239985 : cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
4711 : :
4712 : : /*
4713 : : * If the RestrictInfo is marked pseudoconstant, it will be tested
4714 : : * only once, so treat its cost as all startup cost.
4715 : : */
6292 4716 [ + + ]: 243906 : if (rinfo->pseudoconstant)
4717 : : {
4718 : : /* count one execution during startup */
6261 4719 : 4234 : locContext.total.startup += locContext.total.per_tuple;
4720 : 4234 : locContext.total.per_tuple = 0;
4721 : : }
4722 : 243906 : rinfo->eval_cost = locContext.total;
4723 : : }
4724 : 1460694 : context->total.startup += rinfo->eval_cost.startup;
4725 : 1460694 : context->total.per_tuple += rinfo->eval_cost.per_tuple;
4726 : : /* do NOT recurse into children */
6292 4727 : 1460694 : return false;
4728 : : }
4729 : :
4730 : : /*
4731 : : * For each operator or function node in the given tree, we charge the
4732 : : * estimated execution cost given by pg_proc.procost (remember to multiply
4733 : : * this by cpu_operator_cost).
4734 : : *
4735 : : * Vars and Consts are charged zero, and so are boolean operators (AND,
4736 : : * OR, NOT). Simplistic, but a lot better than no model at all.
4737 : : *
4738 : : * Should we try to account for the possibility of short-circuit
4739 : : * evaluation of AND/OR? Probably *not*, because that would make the
4740 : : * results depend on the clause ordering, and we are not in any position
4741 : : * to expect that the current ordering of the clauses is the one that's
4742 : : * going to end up being used. The above per-RestrictInfo caching would
4743 : : * not mix well with trying to re-order clauses anyway.
4744 : : *
4745 : : * Another issue that is entirely ignored here is that if a set-returning
4746 : : * function is below top level in the tree, the functions/operators above
4747 : : * it will need to be evaluated multiple times. In practical use, such
4748 : : * cases arise so seldom as to not be worth the added complexity needed;
4749 : : * moreover, since our rowcount estimates for functions tend to be pretty
4750 : : * phony, the results would also be pretty phony.
4751 : : */
4752 [ + + ]: 2126688 : if (IsA(node, FuncExpr))
4753 : : {
1891 4754 : 150591 : add_function_cost(context->root, ((FuncExpr *) node)->funcid, node,
4755 : : &context->total);
4756 : : }
6292 4757 [ + + ]: 1976097 : else if (IsA(node, OpExpr) ||
4758 [ + + ]: 1691608 : IsA(node, DistinctExpr) ||
4759 [ + + ]: 1691174 : IsA(node, NullIfExpr))
4760 : : {
4761 : : /* rely on struct equivalence to treat these all alike */
4762 : 284975 : set_opfuncid((OpExpr *) node);
1891 4763 : 284975 : add_function_cost(context->root, ((OpExpr *) node)->opfuncid, node,
4764 : : &context->total);
4765 : : }
7595 4766 [ + + ]: 1691122 : else if (IsA(node, ScalarArrayOpExpr))
4767 : : {
6714 4768 : 18473 : ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
6402 bruce@momjian.us 4769 : 18473 : Node *arraynode = (Node *) lsecond(saop->args);
4770 : : QualCost sacosts;
4771 : : QualCost hcosts;
101 tgl@sss.pgh.pa.us 4772 :GNC 18473 : double estarraylen = estimate_array_length(context->root, arraynode);
4773 : :
6292 tgl@sss.pgh.pa.us 4774 :CBC 18473 : set_sa_opfuncid(saop);
1891 4775 : 18473 : sacosts.startup = sacosts.per_tuple = 0;
4776 : 18473 : add_function_cost(context->root, saop->opfuncid, NULL,
4777 : : &sacosts);
4778 : :
1102 drowley@postgresql.o 4779 [ + + ]: 18473 : if (OidIsValid(saop->hashfuncid))
4780 : : {
4781 : : /* Handle costs for hashed ScalarArrayOpExpr */
4782 : 137 : hcosts.startup = hcosts.per_tuple = 0;
4783 : :
4784 : 137 : add_function_cost(context->root, saop->hashfuncid, NULL, &hcosts);
4785 : 137 : context->total.startup += sacosts.startup + hcosts.startup;
4786 : :
4787 : : /* Estimate the cost of building the hashtable. */
4788 : 137 : context->total.startup += estarraylen * hcosts.per_tuple;
4789 : :
4790 : : /*
4791 : : * XXX should we charge a little bit for sacosts.per_tuple when
4792 : : * building the table, or is it ok to assume there will be zero
4793 : : * hash collision?
4794 : : */
4795 : :
4796 : : /*
4797 : : * Charge for hashtable lookups. Charge a single hash and a
4798 : : * single comparison.
4799 : : */
4800 : 137 : context->total.per_tuple += hcosts.per_tuple + sacosts.per_tuple;
4801 : : }
4802 : : else
4803 : : {
4804 : : /*
4805 : : * Estimate that the operator will be applied to about half of the
4806 : : * array elements before the answer is determined.
4807 : : */
4808 : 18336 : context->total.startup += sacosts.startup;
4809 : 36672 : context->total.per_tuple += sacosts.per_tuple *
101 tgl@sss.pgh.pa.us 4810 :GNC 18336 : estimate_array_length(context->root, arraynode) * 0.5;
4811 : : }
4812 : : }
4739 tgl@sss.pgh.pa.us 4813 [ + + ]:CBC 1672649 : else if (IsA(node, Aggref) ||
4814 [ + + ]: 1646867 : IsA(node, WindowFunc))
4815 : : {
4816 : : /*
4817 : : * Aggref and WindowFunc nodes are (and should be) treated like Vars,
4818 : : * ie, zero execution cost in the current model, because they behave
4819 : : * essentially like Vars at execution. We disregard the costs of
4820 : : * their input expressions for the same reason. The actual execution
4821 : : * costs of the aggregate/window functions and their arguments have to
4822 : : * be factored into plan-node-specific costing of the Agg or WindowAgg
4823 : : * plan node.
4824 : : */
4825 : 27457 : return false; /* don't recurse into children */
4826 : : }
755 4827 [ + + ]: 1645192 : else if (IsA(node, GroupingFunc))
4828 : : {
4829 : : /* Treat this as having cost 1 */
4830 : 175 : context->total.per_tuple += cpu_operator_cost;
4831 : 175 : return false; /* don't recurse into children */
4832 : : }
6158 4833 [ + + ]: 1645017 : else if (IsA(node, CoerceViaIO))
4834 : : {
4835 : 9600 : CoerceViaIO *iocoerce = (CoerceViaIO *) node;
4836 : : Oid iofunc;
4837 : : Oid typioparam;
4838 : : bool typisvarlena;
4839 : :
4840 : : /* check the result type's input function */
4841 : 9600 : getTypeInputInfo(iocoerce->resulttype,
4842 : : &iofunc, &typioparam);
1891 4843 : 9600 : add_function_cost(context->root, iofunc, NULL,
4844 : : &context->total);
4845 : : /* check the input type's output function */
6158 4846 : 9600 : getTypeOutputInfo(exprType((Node *) iocoerce->arg),
4847 : : &iofunc, &typisvarlena);
1891 4848 : 9600 : add_function_cost(context->root, iofunc, NULL,
4849 : : &context->total);
4850 : : }
6228 4851 [ + + ]: 1635417 : else if (IsA(node, ArrayCoerceExpr))
4852 : : {
4853 : 2223 : ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
4854 : : QualCost perelemcost;
4855 : :
2388 4856 : 2223 : cost_qual_eval_node(&perelemcost, (Node *) acoerce->elemexpr,
4857 : : context->root);
4858 : 2223 : context->total.startup += perelemcost.startup;
4859 [ + + ]: 2223 : if (perelemcost.per_tuple > 0)
4860 : 29 : context->total.per_tuple += perelemcost.per_tuple *
101 tgl@sss.pgh.pa.us 4861 :GNC 29 : estimate_array_length(context->root, (Node *) acoerce->arg);
4862 : : }
6682 tgl@sss.pgh.pa.us 4863 [ + + ]:CBC 1633194 : else if (IsA(node, RowCompareExpr))
4864 : : {
4865 : : /* Conservatively assume we will check all the columns */
4866 : 78 : RowCompareExpr *rcexpr = (RowCompareExpr *) node;
4867 : : ListCell *lc;
4868 : :
6292 4869 [ + - + + : 261 : foreach(lc, rcexpr->opnos)
+ + ]
4870 : : {
5995 bruce@momjian.us 4871 : 183 : Oid opid = lfirst_oid(lc);
4872 : :
1891 tgl@sss.pgh.pa.us 4873 : 183 : add_function_cost(context->root, get_opcode(opid), NULL,
4874 : : &context->total);
4875 : : }
4876 : : }
2466 4877 [ + + ]: 1633116 : else if (IsA(node, MinMaxExpr) ||
333 michael@paquier.xyz 4878 [ + + ]: 1633024 : IsA(node, SQLValueFunction) ||
2466 tgl@sss.pgh.pa.us 4879 [ + + ]: 1630833 : IsA(node, XmlExpr) ||
4880 [ + + ]: 1630488 : IsA(node, CoerceToDomain) ||
24 amitlan@postgresql.o 4881 [ + + ]:GNC 1626254 : IsA(node, NextValueExpr) ||
4882 [ + + ]: 1626025 : IsA(node, JsonExpr))
4883 : : {
4884 : : /* Treat all these as having cost 1 */
2466 tgl@sss.pgh.pa.us 4885 :CBC 8160 : context->total.per_tuple += cpu_operator_cost;
4886 : : }
6152 4887 [ + + ]: 1624956 : else if (IsA(node, CurrentOfExpr))
4888 : : {
4889 : : /* Report high cost to prevent selection of anything but TID scan */
6017 4890 : 197 : context->total.startup += disable_cost;
4891 : : }
7763 4892 [ - + ]: 1624759 : else if (IsA(node, SubLink))
4893 : : {
4894 : : /* This routine should not be applied to un-planned expressions */
7569 tgl@sss.pgh.pa.us 4895 [ # # ]:UBC 0 : elog(ERROR, "cannot handle unplanned sub-select");
4896 : : }
7792 tgl@sss.pgh.pa.us 4897 [ + + ]:CBC 1624759 : else if (IsA(node, SubPlan))
4898 : : {
4899 : : /*
4900 : : * A subplan node in an expression typically indicates that the
4901 : : * subplan will be executed on each evaluation, so charge accordingly.
4902 : : * (Sub-selects that can be executed as InitPlans have already been
4903 : : * removed from the expression.)
4904 : : */
7559 bruce@momjian.us 4905 : 17730 : SubPlan *subplan = (SubPlan *) node;
4906 : :
5714 tgl@sss.pgh.pa.us 4907 : 17730 : context->total.startup += subplan->startup_cost;
4908 : 17730 : context->total.per_tuple += subplan->per_call_cost;
4909 : :
4910 : : /*
4911 : : * We don't want to recurse into the testexpr, because it was already
4912 : : * counted in the SubPlan node's costs. So we're done.
4913 : : */
4914 : 17730 : return false;
4915 : : }
4916 [ + + ]: 1607029 : else if (IsA(node, AlternativeSubPlan))
4917 : : {
4918 : : /*
4919 : : * Arbitrarily use the first alternative plan for costing. (We should
4920 : : * certainly only include one alternative, and we don't yet have
4921 : : * enough information to know which one the executor is most likely to
4922 : : * use.)
4923 : : */
4924 : 810 : AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
4925 : :
4926 : 810 : return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
4927 : : context);
4928 : : }
2978 4929 [ + + ]: 1606219 : else if (IsA(node, PlaceHolderVar))
4930 : : {
4931 : : /*
4932 : : * A PlaceHolderVar should be given cost zero when considering general
4933 : : * expression evaluation costs. The expense of doing the contained
4934 : : * expression is charged as part of the tlist eval costs of the scan
4935 : : * or join where the PHV is first computed (see set_rel_width and
4936 : : * add_placeholders_to_joinrel). If we charged it again here, we'd be
4937 : : * double-counting the cost for each level of plan that the PHV
4938 : : * bubbles up through. Hence, return without recursing into the
4939 : : * phexpr.
4940 : : */
4941 : 1257 : return false;
4942 : : }
4943 : :
4944 : : /* recurse into children */
8825 4945 : 2079259 : return expression_tree_walker(node, cost_qual_eval_walker,
4946 : : (void *) context);
4947 : : }
4948 : :
4949 : : /*
4950 : : * get_restriction_qual_cost
4951 : : * Compute evaluation costs of a baserel's restriction quals, plus any
4952 : : * movable join quals that have been pushed down to the scan.
4953 : : * Results are returned into *qpqual_cost.
4954 : : *
4955 : : * This is a convenience subroutine that works for seqscans and other cases
4956 : : * where all the given quals will be evaluated the hard way. It's not useful
4957 : : * for cost_index(), for example, where the index machinery takes care of
4958 : : * some of the quals. We assume baserestrictcost was previously set by
4959 : : * set_baserel_size_estimates().
4960 : : */
4961 : : static void
4378 4962 : 441245 : get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
4963 : : ParamPathInfo *param_info,
4964 : : QualCost *qpqual_cost)
4965 : : {
4966 [ + + ]: 441245 : if (param_info)
4967 : : {
4968 : : /* Include costs of pushed-down clauses */
4969 : 92612 : cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
4970 : :
4971 : 92612 : qpqual_cost->startup += baserel->baserestrictcost.startup;
4972 : 92612 : qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
4973 : : }
4974 : : else
4975 : 348633 : *qpqual_cost = baserel->baserestrictcost;
4976 : 441245 : }
4977 : :
4978 : :
4979 : : /*
4980 : : * compute_semi_anti_join_factors
4981 : : * Estimate how much of the inner input a SEMI, ANTI, or inner_unique join
4982 : : * can be expected to scan.
4983 : : *
4984 : : * In a hash or nestloop SEMI/ANTI join, the executor will stop scanning
4985 : : * inner rows as soon as it finds a match to the current outer row.
4986 : : * The same happens if we have detected the inner rel is unique.
4987 : : * We should therefore adjust some of the cost components for this effect.
4988 : : * This function computes some estimates needed for these adjustments.
4989 : : * These estimates will be the same regardless of the particular paths used
4990 : : * for the outer and inner relation, so we compute these once and then pass
4991 : : * them to all the join cost estimation functions.
4992 : : *
4993 : : * Input parameters:
4994 : : * joinrel: join relation under consideration
4995 : : * outerrel: outer relation under consideration
4996 : : * innerrel: inner relation under consideration
4997 : : * jointype: if not JOIN_SEMI or JOIN_ANTI, we assume it's inner_unique
4998 : : * sjinfo: SpecialJoinInfo relevant to this join
4999 : : * restrictlist: join quals
5000 : : * Output parameters:
5001 : : * *semifactors is filled in (see pathnodes.h for field definitions)
5002 : : */
5003 : : void
4461 5004 : 89511 : compute_semi_anti_join_factors(PlannerInfo *root,
5005 : : RelOptInfo *joinrel,
5006 : : RelOptInfo *outerrel,
5007 : : RelOptInfo *innerrel,
5008 : : JoinType jointype,
5009 : : SpecialJoinInfo *sjinfo,
5010 : : List *restrictlist,
5011 : : SemiAntiJoinFactors *semifactors)
5012 : : {
5013 : : Selectivity jselec;
5014 : : Selectivity nselec;
5015 : : Selectivity avgmatch;
5016 : : SpecialJoinInfo norm_sjinfo;
5017 : : List *joinquals;
5018 : : ListCell *l;
5019 : :
5020 : : /*
5021 : : * In an ANTI join, we must ignore clauses that are "pushed down", since
5022 : : * those won't affect the match logic. In a SEMI join, we do not
5023 : : * distinguish joinquals from "pushed down" quals, so just use the whole
5024 : : * restrictinfo list. For other outer join types, we should consider only
5025 : : * non-pushed-down quals, so that this devolves to an IS_OUTER_JOIN check.
5026 : : */
2564 5027 [ + + ]: 89511 : if (IS_OUTER_JOIN(jointype))
5028 : : {
5454 5029 : 38523 : joinquals = NIL;
4461 5030 [ + - + + : 82800 : foreach(l, restrictlist)
+ + ]
5031 : : {
2561 5032 : 44277 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5033 : :
2186 5034 [ + + + - ]: 44277 : if (!RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
5454 5035 : 42319 : joinquals = lappend(joinquals, rinfo);
5036 : : }
5037 : : }
5038 : : else
4461 5039 : 50988 : joinquals = restrictlist;
5040 : :
5041 : : /*
5042 : : * Get the JOIN_SEMI or JOIN_ANTI selectivity of the join clauses.
5043 : : */
5454 5044 [ + + ]: 89511 : jselec = clauselist_selectivity(root,
5045 : : joinquals,
5046 : : 0,
5047 : : (jointype == JOIN_ANTI) ? JOIN_ANTI : JOIN_SEMI,
5048 : : sjinfo);
5049 : :
5050 : : /*
5051 : : * Also get the normal inner-join selectivity of the join clauses.
5052 : : */
20 amitlan@postgresql.o 5053 :GNC 89511 : init_dummy_sjinfo(&norm_sjinfo, outerrel->relids, innerrel->relids);
5054 : :
5454 tgl@sss.pgh.pa.us 5055 :CBC 89511 : nselec = clauselist_selectivity(root,
5056 : : joinquals,
5057 : : 0,
5058 : : JOIN_INNER,
5059 : : &norm_sjinfo);
5060 : :
5061 : : /* Avoid leaking a lot of ListCells */
2564 5062 [ + + ]: 89511 : if (IS_OUTER_JOIN(jointype))
5454 5063 : 38523 : list_free(joinquals);
5064 : :
5065 : : /*
5066 : : * jselec can be interpreted as the fraction of outer-rel rows that have
5067 : : * any matches (this is true for both SEMI and ANTI cases). And nselec is
5068 : : * the fraction of the Cartesian product that matches. So, the average
5069 : : * number of matches for each outer-rel row that has at least one match is
5070 : : * nselec * inner_rows / jselec.
5071 : : *
5072 : : * Note: it is correct to use the inner rel's "rows" count here, even
5073 : : * though we might later be considering a parameterized inner path with
5074 : : * fewer rows. This is because we have included all the join clauses in
5075 : : * the selectivity estimate.
5076 : : */
5077 [ + + ]: 89511 : if (jselec > 0) /* protect against zero divide */
5078 : : {
4461 5079 : 89361 : avgmatch = nselec * innerrel->rows / jselec;
5080 : : /* Clamp to sane range */
5454 5081 [ + + ]: 89361 : avgmatch = Max(1.0, avgmatch);
5082 : : }
5083 : : else
5084 : 150 : avgmatch = 1.0;
5085 : :
4461 5086 : 89511 : semifactors->outer_match_frac = jselec;
5087 : 89511 : semifactors->match_count = avgmatch;
5088 : 89511 : }
5089 : :
5090 : : /*
5091 : : * has_indexed_join_quals
5092 : : * Check whether all the joinquals of a nestloop join are used as
5093 : : * inner index quals.
5094 : : *
5095 : : * If the inner path of a SEMI/ANTI join is an indexscan (including bitmap
5096 : : * indexscan) that uses all the joinquals as indexquals, we can assume that an
5097 : : * unmatched outer tuple is cheap to process, whereas otherwise it's probably
5098 : : * expensive.
5099 : : */
5100 : : static bool
980 peter@eisentraut.org 5101 : 360685 : has_indexed_join_quals(NestPath *path)
5102 : : {
5103 : 360685 : JoinPath *joinpath = &path->jpath;
4378 tgl@sss.pgh.pa.us 5104 : 360685 : Relids joinrelids = joinpath->path.parent->relids;
5105 : 360685 : Path *innerpath = joinpath->innerjoinpath;
5106 : : List *indexclauses;
5107 : : bool found_one;
5108 : : ListCell *lc;
5109 : :
5110 : : /* If join still has quals to evaluate, it's not fast */
5111 [ + + ]: 360685 : if (joinpath->joinrestrictinfo != NIL)
5112 : 253192 : return false;
5113 : : /* Nor if the inner path isn't parameterized at all */
5114 [ + + ]: 107493 : if (innerpath->param_info == NULL)
5115 : 2382 : return false;
5116 : :
5117 : : /* Find the indexclauses list for the inner scan */
5118 [ + + + ]: 105111 : switch (innerpath->pathtype)
5119 : : {
5120 : 66341 : case T_IndexScan:
5121 : : case T_IndexOnlyScan:
5122 : 66341 : indexclauses = ((IndexPath *) innerpath)->indexclauses;
5123 : 66341 : break;
5124 : 141 : case T_BitmapHeapScan:
5125 : : {
5126 : : /* Accept only a simple bitmap scan, not AND/OR cases */
4326 bruce@momjian.us 5127 : 141 : Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
5128 : :
5129 [ + + ]: 141 : if (IsA(bmqual, IndexPath))
5130 : 117 : indexclauses = ((IndexPath *) bmqual)->indexclauses;
5131 : : else
5132 : 24 : return false;
5133 : 117 : break;
5134 : : }
4378 tgl@sss.pgh.pa.us 5135 : 38629 : default:
5136 : :
5137 : : /*
5138 : : * If it's not a simple indexscan, it probably doesn't run quickly
5139 : : * for zero rows out, even if it's a parameterized path using all
5140 : : * the joinquals.
5141 : : */
4461 5142 : 38629 : return false;
5143 : : }
5144 : :
5145 : : /*
5146 : : * Examine the inner path's param clauses. Any that are from the outer
5147 : : * path must be found in the indexclauses list, either exactly or in an
5148 : : * equivalent form generated by equivclass.c. Also, we must find at least
5149 : : * one such clause, else it's a clauseless join which isn't fast.
5150 : : */
4378 5151 : 66458 : found_one = false;
5152 [ + - + + : 132584 : foreach(lc, innerpath->param_info->ppi_clauses)
+ + ]
5153 : : {
5154 : 67604 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
5155 : :
5156 [ + + ]: 67604 : if (join_clause_is_movable_into(rinfo,
5157 : 67604 : innerpath->parent->relids,
5158 : : joinrelids))
5159 : : {
1891 5160 [ + + ]: 67370 : if (!is_redundant_with_indexclauses(rinfo, indexclauses))
4378 5161 : 1478 : return false;
5162 : 65892 : found_one = true;
5163 : : }
5164 : : }
5165 : 64980 : return found_one;
5166 : : }
5167 : :
5168 : :
5169 : : /*
5170 : : * approx_tuple_count
5171 : : * Quick-and-dirty estimation of the number of join rows passing
5172 : : * a set of qual conditions.
5173 : : *
5174 : : * The quals can be either an implicitly-ANDed list of boolean expressions,
5175 : : * or a list of RestrictInfo nodes (typically the latter).
5176 : : *
5177 : : * We intentionally compute the selectivity under JOIN_INNER rules, even
5178 : : * if it's some type of outer join. This is appropriate because we are
5179 : : * trying to figure out how many tuples pass the initial merge or hash
5180 : : * join step.
5181 : : *
5182 : : * This is quick-and-dirty because we bypass clauselist_selectivity, and
5183 : : * simply multiply the independent clause selectivities together. Now
5184 : : * clauselist_selectivity often can't do any better than that anyhow, but
5185 : : * for some situations (such as range constraints) it is smarter. However,
5186 : : * we can't effectively cache the results of clauselist_selectivity, whereas
5187 : : * the individual clause selectivities can be and are cached.
5188 : : *
5189 : : * Since we are only using the results to estimate how many potential
5190 : : * output tuples are generated and passed through qpqual checking, it
5191 : : * seems OK to live with the approximation.
5192 : : */
5193 : : static double
5546 5194 : 192531 : approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
5195 : : {
5196 : : double tuples;
4461 5197 : 192531 : double outer_tuples = path->outerjoinpath->rows;
5198 : 192531 : double inner_tuples = path->innerjoinpath->rows;
5199 : : SpecialJoinInfo sjinfo;
5720 5200 : 192531 : Selectivity selec = 1.0;
5201 : : ListCell *l;
5202 : :
5203 : : /*
5204 : : * Make up a SpecialJoinInfo for JOIN_INNER semantics.
5205 : : */
20 amitlan@postgresql.o 5206 :GNC 192531 : init_dummy_sjinfo(&sjinfo, path->outerjoinpath->parent->relids,
5207 : 192531 : path->innerjoinpath->parent->relids);
5208 : :
5209 : : /* Get the approximate selectivity */
8349 tgl@sss.pgh.pa.us 5210 [ + + + + :CBC 412394 : foreach(l, quals)
+ + ]
5211 : : {
5212 : 219863 : Node *qual = (Node *) lfirst(l);
5213 : :
5214 : : /* Note that clause_selectivity will be able to cache its result */
2565 simon@2ndQuadrant.co 5215 : 219863 : selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
5216 : : }
5217 : :
5218 : : /* Apply it to the input relation sizes */
5546 tgl@sss.pgh.pa.us 5219 : 192531 : tuples = selec * outer_tuples * inner_tuples;
5220 : :
5720 5221 : 192531 : return clamp_row_est(tuples);
5222 : : }
5223 : :
5224 : :
5225 : : /*
5226 : : * set_baserel_size_estimates
5227 : : * Set the size estimates for the given base relation.
5228 : : *
5229 : : * The rel's targetlist and restrictinfo list must have been constructed
5230 : : * already, and rel->tuples must be set.
5231 : : *
5232 : : * We set the following fields of the rel node:
5233 : : * rows: the estimated number of output tuples (after applying
5234 : : * restriction clauses).
5235 : : * width: the estimated average output tuple width in bytes.
5236 : : * baserestrictcost: estimated cost of evaluating baserestrictinfo clauses.
5237 : : */
5238 : : void
6888 5239 : 213501 : set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5240 : : {
5241 : : double nrows;
5242 : :
5243 : : /* Should only be applied to base relations */
7736 5244 [ - + ]: 213501 : Assert(rel->relid > 0);
5245 : :
7405 5246 : 426990 : nrows = rel->tuples *
7406 5247 : 213501 : clauselist_selectivity(root,
5248 : : rel->baserestrictinfo,
5249 : : 0,
5250 : : JOIN_INNER,
5251 : : NULL);
5252 : :
7405 5253 : 213489 : rel->rows = clamp_row_est(nrows);
5254 : :
6261 5255 : 213489 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
5256 : :
8862 5257 : 213489 : set_rel_width(root, rel);
10141 scrappy@hub.org 5258 : 213489 : }
5259 : :
5260 : : /*
5261 : : * get_parameterized_baserel_size
5262 : : * Make a size estimate for a parameterized scan of a base relation.
5263 : : *
5264 : : * 'param_clauses' lists the additional join clauses to be used.
5265 : : *
5266 : : * set_baserel_size_estimates must have been applied already.
5267 : : */
5268 : : double
4378 tgl@sss.pgh.pa.us 5269 : 61457 : get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
5270 : : List *param_clauses)
5271 : : {
5272 : : List *allclauses;
5273 : : double nrows;
5274 : :
5275 : : /*
5276 : : * Estimate the number of rows returned by the parameterized scan, knowing
5277 : : * that it will apply all the extra join clauses as well as the rel's own
5278 : : * restriction clauses. Note that we force the clauses to be treated as
5279 : : * non-join clauses during selectivity estimation.
5280 : : */
1707 5281 : 61457 : allclauses = list_concat_copy(param_clauses, rel->baserestrictinfo);
4378 5282 : 122914 : nrows = rel->tuples *
5283 : 61457 : clauselist_selectivity(root,
5284 : : allclauses,
2489 5285 : 61457 : rel->relid, /* do not use 0! */
5286 : : JOIN_INNER,
5287 : : NULL);
4378 5288 : 61457 : nrows = clamp_row_est(nrows);
5289 : : /* For safety, make sure result is not more than the base estimate */
5290 [ - + ]: 61457 : if (nrows > rel->rows)
4378 tgl@sss.pgh.pa.us 5291 :UBC 0 : nrows = rel->rows;
4378 tgl@sss.pgh.pa.us 5292 :CBC 61457 : return nrows;
5293 : : }
5294 : :
5295 : : /*
5296 : : * set_joinrel_size_estimates
5297 : : * Set the size estimates for the given join relation.
5298 : : *
5299 : : * The rel's targetlist must have been constructed already, and a
5300 : : * restriction clause list that matches the given component rels must
5301 : : * be provided.
5302 : : *
5303 : : * Since there is more than one way to make a joinrel for more than two
5304 : : * base relations, the results we get here could depend on which component
5305 : : * rel pair is provided. In theory we should get the same answers no matter
5306 : : * which pair is provided; in practice, since the selectivity estimation
5307 : : * routines don't handle all cases equally well, we might not. But there's
5308 : : * not much to be done about it. (Would it make sense to repeat the
5309 : : * calculations for each pair of input rels that's encountered, and somehow
5310 : : * average the results? Probably way more trouble than it's worth, and
5311 : : * anyway we must keep the rowcount estimate the same for all paths for the
5312 : : * joinrel.)
5313 : : *
5314 : : * We set only the rows field here. The reltarget field was already set by
5315 : : * build_joinrel_tlist, and baserestrictcost is not used for join rels.
5316 : : */
5317 : : void
6888 5318 : 90893 : set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
5319 : : RelOptInfo *outer_rel,
5320 : : RelOptInfo *inner_rel,
5321 : : SpecialJoinInfo *sjinfo,
5322 : : List *restrictlist)
5323 : : {
4461 5324 : 90893 : rel->rows = calc_joinrel_size_estimate(root,
5325 : : rel,
5326 : : outer_rel,
5327 : : inner_rel,
5328 : : outer_rel->rows,
5329 : : inner_rel->rows,
5330 : : sjinfo,
5331 : : restrictlist);
5332 : 90893 : }
5333 : :
5334 : : /*
5335 : : * get_parameterized_joinrel_size
5336 : : * Make a size estimate for a parameterized scan of a join relation.
5337 : : *
5338 : : * 'rel' is the joinrel under consideration.
5339 : : * 'outer_path', 'inner_path' are (probably also parameterized) Paths that
5340 : : * produce the relations being joined.
5341 : : * 'sjinfo' is any SpecialJoinInfo relevant to this join.
5342 : : * 'restrict_clauses' lists the join clauses that need to be applied at the
5343 : : * join node (including any movable clauses that were moved down to this join,
5344 : : * and not including any movable clauses that were pushed down into the
5345 : : * child paths).
5346 : : *
5347 : : * set_joinrel_size_estimates must have been applied already.
5348 : : */
5349 : : double
4378 5350 : 3640 : get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
5351 : : Path *outer_path,
5352 : : Path *inner_path,
5353 : : SpecialJoinInfo *sjinfo,
5354 : : List *restrict_clauses)
5355 : : {
5356 : : double nrows;
5357 : :
5358 : : /*
5359 : : * Estimate the number of rows returned by the parameterized join as the
5360 : : * sizes of the input paths times the selectivity of the clauses that have
5361 : : * ended up at this join node.
5362 : : *
5363 : : * As with set_joinrel_size_estimates, the rowcount estimate could depend
5364 : : * on the pair of input paths provided, though ideally we'd get the same
5365 : : * estimate for any pair with the same parameterization.
5366 : : */
5367 : 3640 : nrows = calc_joinrel_size_estimate(root,
5368 : : rel,
5369 : : outer_path->parent,
5370 : : inner_path->parent,
5371 : : outer_path->rows,
5372 : : inner_path->rows,
5373 : : sjinfo,
5374 : : restrict_clauses);
5375 : : /* For safety, make sure result is not more than the base estimate */
5376 [ + + ]: 3640 : if (nrows > rel->rows)
5377 : 6 : nrows = rel->rows;
5378 : 3640 : return nrows;
5379 : : }
5380 : :
5381 : : /*
5382 : : * calc_joinrel_size_estimate
5383 : : * Workhorse for set_joinrel_size_estimates and
5384 : : * get_parameterized_joinrel_size.
5385 : : *
5386 : : * outer_rel/inner_rel are the relations being joined, but they should be
5387 : : * assumed to have sizes outer_rows/inner_rows; those numbers might be less
5388 : : * than what rel->rows says, when we are considering parameterized paths.
5389 : : */
5390 : : static double
4461 5391 : 94533 : calc_joinrel_size_estimate(PlannerInfo *root,
5392 : : RelOptInfo *joinrel,
5393 : : RelOptInfo *outer_rel,
5394 : : RelOptInfo *inner_rel,
5395 : : double outer_rows,
5396 : : double inner_rows,
5397 : : SpecialJoinInfo *sjinfo,
5398 : : List *restrictlist)
5399 : : {
5722 5400 : 94533 : JoinType jointype = sjinfo->jointype;
5401 : : Selectivity fkselec;
5402 : : Selectivity jselec;
5403 : : Selectivity pselec;
5404 : : double nrows;
5405 : :
5406 : : /*
5407 : : * Compute joinclause selectivity. Note that we are only considering
5408 : : * clauses that become restriction clauses at this join level; we are not
5409 : : * double-counting them because they were not considered in estimating the
5410 : : * sizes of the component rels.
5411 : : *
5412 : : * First, see whether any of the joinclauses can be matched to known FK
5413 : : * constraints. If so, drop those clauses from the restrictlist, and
5414 : : * instead estimate their selectivity using FK semantics. (We do this
5415 : : * without regard to whether said clauses are local or "pushed down".
5416 : : * Probably, an FK-matching clause could never be seen as pushed down at
5417 : : * an outer join, since it would be strict and hence would be grounds for
5418 : : * join strength reduction.) fkselec gets the net selectivity for
5419 : : * FK-matching clauses, or 1.0 if there are none.
5420 : : */
2857 5421 : 94533 : fkselec = get_foreign_key_join_selectivity(root,
5422 : : outer_rel->relids,
5423 : : inner_rel->relids,
5424 : : sjinfo,
5425 : : &restrictlist);
5426 : :
5427 : : /*
5428 : : * For an outer join, we have to distinguish the selectivity of the join's
5429 : : * own clauses (JOIN/ON conditions) from any clauses that were "pushed
5430 : : * down". For inner joins we just count them all as joinclauses.
5431 : : */
6365 5432 [ + + ]: 94533 : if (IS_OUTER_JOIN(jointype))
5433 : : {
5434 : 37536 : List *joinquals = NIL;
5435 : 37536 : List *pushedquals = NIL;
5436 : : ListCell *l;
5437 : :
5438 : : /* Grovel through the clauses to separate into two lists */
5439 [ + + + + : 83512 : foreach(l, restrictlist)
+ + ]
5440 : : {
2561 5441 : 45976 : RestrictInfo *rinfo = lfirst_node(RestrictInfo, l);
5442 : :
2186 5443 [ + + + + ]: 45976 : if (RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids))
6365 5444 : 1878 : pushedquals = lappend(pushedquals, rinfo);
5445 : : else
5446 : 44098 : joinquals = lappend(joinquals, rinfo);
5447 : : }
5448 : :
5449 : : /* Get the separate selectivities */
2868 5450 : 37536 : jselec = clauselist_selectivity(root,
5451 : : joinquals,
5452 : : 0,
5453 : : jointype,
5454 : : sjinfo);
6365 5455 : 37536 : pselec = clauselist_selectivity(root,
5456 : : pushedquals,
5457 : : 0,
5458 : : jointype,
5459 : : sjinfo);
5460 : :
5461 : : /* Avoid leaking a lot of ListCells */
5462 : 37536 : list_free(joinquals);
5463 : 37536 : list_free(pushedquals);
5464 : : }
5465 : : else
5466 : : {
2868 5467 : 56997 : jselec = clauselist_selectivity(root,
5468 : : restrictlist,
5469 : : 0,
5470 : : jointype,
5471 : : sjinfo);
6365 5472 : 56997 : pselec = 0.0; /* not used, keep compiler quiet */
5473 : : }
5474 : :
5475 : : /*
5476 : : * Basically, we multiply size of Cartesian product by selectivity.
5477 : : *
5478 : : * If we are doing an outer join, take that into account: the joinqual
5479 : : * selectivity has to be clamped using the knowledge that the output must
5480 : : * be at least as large as the non-nullable input. However, any
5481 : : * pushed-down quals are applied after the outer join, so their
5482 : : * selectivity applies fully.
5483 : : *
5484 : : * For JOIN_SEMI and JOIN_ANTI, the selectivity is defined as the fraction
5485 : : * of LHS rows that have matches, and we apply that straightforwardly.
5486 : : */
8458 5487 [ + + + + : 94533 : switch (jointype)
+ - ]
5488 : : {
5489 : 54630 : case JOIN_INNER:
2857 5490 : 54630 : nrows = outer_rows * inner_rows * fkselec * jselec;
5491 : : /* pselec not used */
8458 5492 : 54630 : break;
5493 : 34497 : case JOIN_LEFT:
2857 5494 : 34497 : nrows = outer_rows * inner_rows * fkselec * jselec;
4461 5495 [ + + ]: 34497 : if (nrows < outer_rows)
5496 : 11831 : nrows = outer_rows;
6365 5497 : 34497 : nrows *= pselec;
8458 5498 : 34497 : break;
5499 : 845 : case JOIN_FULL:
2857 5500 : 845 : nrows = outer_rows * inner_rows * fkselec * jselec;
4461 5501 [ + + ]: 845 : if (nrows < outer_rows)
5502 : 546 : nrows = outer_rows;
5503 [ + + ]: 845 : if (nrows < inner_rows)
5504 : 60 : nrows = inner_rows;
6365 5505 : 845 : nrows *= pselec;
8458 5506 : 845 : break;
5722 5507 : 2367 : case JOIN_SEMI:
2857 5508 : 2367 : nrows = outer_rows * fkselec * jselec;
5509 : : /* pselec not used */
7755 5510 : 2367 : break;
5722 5511 : 2194 : case JOIN_ANTI:
2857 5512 : 2194 : nrows = outer_rows * (1.0 - fkselec * jselec);
5722 5513 : 2194 : nrows *= pselec;
7755 5514 : 2194 : break;
8458 tgl@sss.pgh.pa.us 5515 :UBC 0 : default:
5516 : : /* other values not expected here */
7569 5517 [ # # ]: 0 : elog(ERROR, "unrecognized join type: %d", (int) jointype);
5518 : : nrows = 0; /* keep compiler quiet */
5519 : : break;
5520 : : }
5521 : :
4461 tgl@sss.pgh.pa.us 5522 :CBC 94533 : return clamp_row_est(nrows);
5523 : : }
5524 : :
5525 : : /*
5526 : : * get_foreign_key_join_selectivity
5527 : : * Estimate join selectivity for foreign-key-related clauses.
5528 : : *
5529 : : * Remove any clauses that can be matched to FK constraints from *restrictlist,
5530 : : * and return a substitute estimate of their selectivity. 1.0 is returned
5531 : : * when there are no such clauses.
5532 : : *
5533 : : * The reason for treating such clauses specially is that we can get better
5534 : : * estimates this way than by relying on clauselist_selectivity(), especially
5535 : : * for multi-column FKs where that function's assumption that the clauses are
5536 : : * independent falls down badly. But even with single-column FKs, we may be
5537 : : * able to get a better answer when the pg_statistic stats are missing or out
5538 : : * of date.
5539 : : */
5540 : : static Selectivity
2857 5541 : 94533 : get_foreign_key_join_selectivity(PlannerInfo *root,
5542 : : Relids outer_relids,
5543 : : Relids inner_relids,
5544 : : SpecialJoinInfo *sjinfo,
5545 : : List **restrictlist)
5546 : : {
5547 : 94533 : Selectivity fkselec = 1.0;
5548 : 94533 : JoinType jointype = sjinfo->jointype;
5549 : 94533 : List *worklist = *restrictlist;
5550 : : ListCell *lc;
5551 : :
5552 : : /* Consider each FK constraint that is known to match the query */
5553 [ + + + + : 95474 : foreach(lc, root->fkey_list)
+ + ]
5554 : : {
5555 : 941 : ForeignKeyOptInfo *fkinfo = (ForeignKeyOptInfo *) lfirst(lc);
5556 : : bool ref_is_outer;
5557 : : List *removedlist;
5558 : : ListCell *cell;
5559 : :
5560 : : /*
5561 : : * This FK is not relevant unless it connects a baserel on one side of
5562 : : * this join to a baserel on the other side.
5563 : : */
5564 [ + + + + ]: 1709 : if (bms_is_member(fkinfo->con_relid, outer_relids) &&
5565 : 768 : bms_is_member(fkinfo->ref_relid, inner_relids))
5566 : 684 : ref_is_outer = false;
5567 [ + + + + ]: 421 : else if (bms_is_member(fkinfo->ref_relid, outer_relids) &&
5568 : 164 : bms_is_member(fkinfo->con_relid, inner_relids))
5569 : 65 : ref_is_outer = true;
5570 : : else
5571 : 192 : continue;
5572 : :
5573 : : /*
5574 : : * If we're dealing with a semi/anti join, and the FK's referenced
5575 : : * relation is on the outside, then knowledge of the FK doesn't help
5576 : : * us figure out what we need to know (which is the fraction of outer
5577 : : * rows that have matches). On the other hand, if the referenced rel
5578 : : * is on the inside, then all outer rows must have matches in the
5579 : : * referenced table (ignoring nulls). But any restriction or join
5580 : : * clauses that filter that table will reduce the fraction of matches.
5581 : : * We can account for restriction clauses, but it's too hard to guess
5582 : : * how many table rows would get through a join that's inside the RHS.
5583 : : * Hence, if either case applies, punt and ignore the FK.
5584 : : */
2491 5585 [ + - + + : 749 : if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
+ + ]
5586 [ - + ]: 488 : (ref_is_outer || bms_membership(inner_relids) != BMS_SINGLETON))
5587 : 6 : continue;
5588 : :
5589 : : /*
5590 : : * Modify the restrictlist by removing clauses that match the FK (and
5591 : : * putting them into removedlist instead). It seems unsafe to modify
5592 : : * the originally-passed List structure, so we make a shallow copy the
5593 : : * first time through.
5594 : : */
2857 5595 [ + + ]: 743 : if (worklist == *restrictlist)
5596 : 631 : worklist = list_copy(worklist);
5597 : :
5598 : 743 : removedlist = NIL;
1735 5599 [ + + + + : 1524 : foreach(cell, worklist)
+ + ]
5600 : : {
2857 5601 : 781 : RestrictInfo *rinfo = (RestrictInfo *) lfirst(cell);
5602 : 781 : bool remove_it = false;
5603 : : int i;
5604 : :
5605 : : /* Drop this clause if it matches any column of the FK */
5606 [ + + ]: 974 : for (i = 0; i < fkinfo->nkeys; i++)
5607 : : {
5608 [ + + ]: 959 : if (rinfo->parent_ec)
5609 : : {
5610 : : /*
5611 : : * EC-derived clauses can only match by EC. It is okay to
5612 : : * consider any clause derived from the same EC as
5613 : : * matching the FK: even if equivclass.c chose to generate
5614 : : * a clause equating some other pair of Vars, it could
5615 : : * have generated one equating the FK's Vars. So for
5616 : : * purposes of estimation, we can act as though it did so.
5617 : : *
5618 : : * Note: checking parent_ec is a bit of a cheat because
5619 : : * there are EC-derived clauses that don't have parent_ec
5620 : : * set; but such clauses must compare expressions that
5621 : : * aren't just Vars, so they cannot match the FK anyway.
5622 : : */
5623 [ + + ]: 152 : if (fkinfo->eclass[i] == rinfo->parent_ec)
5624 : : {
5625 : 149 : remove_it = true;
5626 : 149 : break;
5627 : : }
5628 : : }
5629 : : else
5630 : : {
5631 : : /*
5632 : : * Otherwise, see if rinfo was previously matched to FK as
5633 : : * a "loose" clause.
5634 : : */
5635 [ + + ]: 807 : if (list_member_ptr(fkinfo->rinfos[i], rinfo))
5636 : : {
5637 : 617 : remove_it = true;
5638 : 617 : break;
5639 : : }
5640 : : }
5641 : : }
5642 [ + + ]: 781 : if (remove_it)
5643 : : {
1735 5644 : 766 : worklist = foreach_delete_current(worklist, cell);
2857 5645 : 766 : removedlist = lappend(removedlist, rinfo);
5646 : : }
5647 : : }
5648 : :
5649 : : /*
5650 : : * If we failed to remove all the matching clauses we expected to
5651 : : * find, chicken out and ignore this FK; applying its selectivity
5652 : : * might result in double-counting. Put any clauses we did manage to
5653 : : * remove back into the worklist.
5654 : : *
5655 : : * Since the matching clauses are known not outerjoin-delayed, they
5656 : : * would normally have appeared in the initial joinclause list. If we
5657 : : * didn't find them, there are two possibilities:
5658 : : *
5659 : : * 1. If the FK match is based on an EC that is ec_has_const, it won't
5660 : : * have generated any join clauses at all. We discount such ECs while
5661 : : * checking to see if we have "all" the clauses. (Below, we'll adjust
5662 : : * the selectivity estimate for this case.)
5663 : : *
5664 : : * 2. The clauses were matched to some other FK in a previous
5665 : : * iteration of this loop, and thus removed from worklist. (A likely
5666 : : * case is that two FKs are matched to the same EC; there will be only
5667 : : * one EC-derived clause in the initial list, so the first FK will
5668 : : * consume it.) Applying both FKs' selectivity independently risks
5669 : : * underestimating the join size; in particular, this would undo one
5670 : : * of the main things that ECs were invented for, namely to avoid
5671 : : * double-counting the selectivity of redundant equality conditions.
5672 : : * Later we might think of a reasonable way to combine the estimates,
5673 : : * but for now, just punt, since this is a fairly uncommon situation.
5674 : : */
1264 5675 [ + + ]: 743 : if (removedlist == NIL ||
5676 : 600 : list_length(removedlist) !=
5677 [ - + ]: 600 : (fkinfo->nmatched_ec - fkinfo->nconst_ec + fkinfo->nmatched_ri))
5678 : : {
2857 5679 : 143 : worklist = list_concat(worklist, removedlist);
5680 : 143 : continue;
5681 : : }
5682 : :
5683 : : /*
5684 : : * Finally we get to the payoff: estimate selectivity using the
5685 : : * knowledge that each referencing row will match exactly one row in
5686 : : * the referenced table.
5687 : : *
5688 : : * XXX that's not true in the presence of nulls in the referencing
5689 : : * column(s), so in principle we should derate the estimate for those.
5690 : : * However (1) if there are any strict restriction clauses for the
5691 : : * referencing column(s) elsewhere in the query, derating here would
5692 : : * be double-counting the null fraction, and (2) it's not very clear
5693 : : * how to combine null fractions for multiple referencing columns. So
5694 : : * we do nothing for now about correcting for nulls.
5695 : : *
5696 : : * XXX another point here is that if either side of an FK constraint
5697 : : * is an inheritance parent, we estimate as though the constraint
5698 : : * covers all its children as well. This is not an unreasonable
5699 : : * assumption for a referencing table, ie the user probably applied
5700 : : * identical constraints to all child tables (though perhaps we ought
5701 : : * to check that). But it's not possible to have done that for a
5702 : : * referenced table. Fortunately, precisely because that doesn't
5703 : : * work, it is uncommon in practice to have an FK referencing a parent
5704 : : * table. So, at least for now, disregard inheritance here.
5705 : : */
2491 5706 [ + - + + ]: 600 : if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
2857 5707 : 376 : {
5708 : : /*
5709 : : * For JOIN_SEMI and JOIN_ANTI, we only get here when the FK's
5710 : : * referenced table is exactly the inside of the join. The join
5711 : : * selectivity is defined as the fraction of LHS rows that have
5712 : : * matches. The FK implies that every LHS row has a match *in the
5713 : : * referenced table*; but any restriction clauses on it will
5714 : : * reduce the number of matches. Hence we take the join
5715 : : * selectivity as equal to the selectivity of the table's
5716 : : * restriction clauses, which is rows / tuples; but we must guard
5717 : : * against tuples == 0.
5718 : : */
2491 5719 : 376 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5720 [ + + ]: 376 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5721 : :
5722 : 376 : fkselec *= ref_rel->rows / ref_tuples;
5723 : : }
5724 : : else
5725 : : {
5726 : : /*
5727 : : * Otherwise, selectivity is exactly 1/referenced-table-size; but
5728 : : * guard against tuples == 0. Note we should use the raw table
5729 : : * tuple count, not any estimate of its filtered or joined size.
5730 : : */
2857 5731 : 224 : RelOptInfo *ref_rel = find_base_rel(root, fkinfo->ref_relid);
5732 [ + - ]: 224 : double ref_tuples = Max(ref_rel->tuples, 1.0);
5733 : :
5734 : 224 : fkselec *= 1.0 / ref_tuples;
5735 : : }
5736 : :
5737 : : /*
5738 : : * If any of the FK columns participated in ec_has_const ECs, then
5739 : : * equivclass.c will have generated "var = const" restrictions for
5740 : : * each side of the join, thus reducing the sizes of both input
5741 : : * relations. Taking the fkselec at face value would amount to
5742 : : * double-counting the selectivity of the constant restriction for the
5743 : : * referencing Var. Hence, look for the restriction clause(s) that
5744 : : * were applied to the referencing Var(s), and divide out their
5745 : : * selectivity to correct for this.
5746 : : */
1264 5747 [ + + ]: 600 : if (fkinfo->nconst_ec > 0)
5748 : : {
5749 [ + + ]: 12 : for (int i = 0; i < fkinfo->nkeys; i++)
5750 : : {
5751 : 9 : EquivalenceClass *ec = fkinfo->eclass[i];
5752 : :
5753 [ + - + + ]: 9 : if (ec && ec->ec_has_const)
5754 : : {
5755 : 3 : EquivalenceMember *em = fkinfo->fk_eclass_member[i];
5756 : 3 : RestrictInfo *rinfo = find_derived_clause_for_ec_member(ec,
5757 : : em);
5758 : :
5759 [ + - ]: 3 : if (rinfo)
5760 : : {
5761 : : Selectivity s0;
5762 : :
5763 : 3 : s0 = clause_selectivity(root,
5764 : : (Node *) rinfo,
5765 : : 0,
5766 : : jointype,
5767 : : sjinfo);
5768 [ + - ]: 3 : if (s0 > 0)
5769 : 3 : fkselec /= s0;
5770 : : }
5771 : : }
5772 : : }
5773 : : }
5774 : : }
5775 : :
2857 5776 : 94533 : *restrictlist = worklist;
1264 5777 [ - + - + ]: 94533 : CLAMP_PROBABILITY(fkselec);
2857 5778 : 94533 : return fkselec;
5779 : : }
5780 : :
5781 : : /*
5782 : : * set_subquery_size_estimates
5783 : : * Set the size estimates for a base relation that is a subquery.
5784 : : *
5785 : : * The rel's targetlist and restrictinfo list must have been constructed
5786 : : * already, and the Paths for the subquery must have been completed.
5787 : : * We look at the subquery's PlannerInfo to extract data.
5788 : : *
5789 : : * We set the same fields as set_baserel_size_estimates.
5790 : : */
5791 : : void
4607 5792 : 10599 : set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5793 : : {
5794 : 10599 : PlannerInfo *subroot = rel->subroot;
5795 : : RelOptInfo *sub_final_rel;
5796 : : ListCell *lc;
5797 : :
5798 : : /* Should only be applied to base relations that are subqueries */
4895 5799 [ - + ]: 10599 : Assert(rel->relid > 0);
2397 andrew@dunslane.net 5800 [ + - - + ]: 10599 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_SUBQUERY);
5801 : :
5802 : : /*
5803 : : * Copy raw number of output rows from subquery. All of its paths should
5804 : : * have the same output rowcount, so just look at cheapest-total.
5805 : : */
2960 tgl@sss.pgh.pa.us 5806 : 10599 : sub_final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
5807 : 10599 : rel->tuples = sub_final_rel->cheapest_total_path->rows;
5808 : :
5809 : : /*
5810 : : * Compute per-output-column width estimates by examining the subquery's
5811 : : * targetlist. For any output that is a plain Var, get the width estimate
5812 : : * that was made while planning the subquery. Otherwise, we leave it to
5813 : : * set_rel_width to fill in a datatype-based default estimate.
5814 : : */
4895 5815 [ + + + + : 42384 : foreach(lc, subroot->parse->targetList)
+ + ]
5816 : : {
2561 5817 : 31785 : TargetEntry *te = lfirst_node(TargetEntry, lc);
4895 5818 : 31785 : Node *texpr = (Node *) te->expr;
4618 5819 : 31785 : int32 item_width = 0;
5820 : :
5821 : : /* junk columns aren't visible to upper query */
4895 5822 [ + + ]: 31785 : if (te->resjunk)
5823 : 562 : continue;
5824 : :
5825 : : /*
5826 : : * The subquery could be an expansion of a view that's had columns
5827 : : * added to it since the current query was parsed, so that there are
5828 : : * non-junk tlist columns in it that don't correspond to any column
5829 : : * visible at our query level. Ignore such columns.
5830 : : */
4032 5831 [ + - - + ]: 31223 : if (te->resno < rel->min_attr || te->resno > rel->max_attr)
4032 tgl@sss.pgh.pa.us 5832 :UBC 0 : continue;
5833 : :
5834 : : /*
5835 : : * XXX This currently doesn't work for subqueries containing set
5836 : : * operations, because the Vars in their tlists are bogus references
5837 : : * to the first leaf subquery, which wouldn't give the right answer
5838 : : * even if we could still get to its PlannerInfo.
5839 : : *
5840 : : * Also, the subquery could be an appendrel for which all branches are
5841 : : * known empty due to constraint exclusion, in which case
5842 : : * set_append_rel_pathlist will have left the attr_widths set to zero.
5843 : : *
5844 : : * In either case, we just leave the width estimate zero until
5845 : : * set_rel_width fixes it.
5846 : : */
4895 tgl@sss.pgh.pa.us 5847 [ + + ]:CBC 31223 : if (IsA(texpr, Var) &&
5848 [ + + ]: 14088 : subroot->parse->setOperations == NULL)
5849 : : {
4753 bruce@momjian.us 5850 : 13393 : Var *var = (Var *) texpr;
4895 tgl@sss.pgh.pa.us 5851 : 13393 : RelOptInfo *subrel = find_base_rel(subroot, var->varno);
5852 : :
5853 : 13393 : item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
5854 : : }
5855 : 31223 : rel->attr_widths[te->resno - rel->min_attr] = item_width;
5856 : : }
5857 : :
5858 : : /* Now estimate number of output rows, etc */
5859 : 10599 : set_baserel_size_estimates(root, rel);
5860 : 10599 : }
5861 : :
5862 : : /*
5863 : : * set_function_size_estimates
5864 : : * Set the size estimates for a base relation that is a function call.
5865 : : *
5866 : : * The rel's targetlist and restrictinfo list must have been constructed
5867 : : * already.
5868 : : *
5869 : : * We set the same fields as set_baserel_size_estimates.
5870 : : */
5871 : : void
6888 5872 : 21525 : set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5873 : : {
5874 : : RangeTblEntry *rte;
5875 : : ListCell *lc;
5876 : :
5877 : : /* Should only be applied to base relations that are functions */
7736 5878 [ - + ]: 21525 : Assert(rel->relid > 0);
6203 5879 [ + - ]: 21525 : rte = planner_rt_fetch(rel->relid, root);
6766 5880 [ - + ]: 21525 : Assert(rte->rtekind == RTE_FUNCTION);
5881 : :
5882 : : /*
5883 : : * Estimate number of rows the functions will return. The rowcount of the
5884 : : * node is that of the largest function result.
5885 : : */
3797 5886 : 21525 : rel->tuples = 0;
5887 [ + - + + : 43206 : foreach(lc, rte->functions)
+ + ]
5888 : : {
5889 : 21681 : RangeTblFunction *rtfunc = (RangeTblFunction *) lfirst(lc);
1891 5890 : 21681 : double ntup = expression_returns_set_rows(root, rtfunc->funcexpr);
5891 : :
3797 5892 [ + + ]: 21681 : if (ntup > rel->tuples)
5893 : 21537 : rel->tuples = ntup;
5894 : : }
5895 : :
5896 : : /* Now estimate number of output rows, etc */
7405 5897 : 21525 : set_baserel_size_estimates(root, rel);
8008 5898 : 21525 : }
5899 : :
5900 : : /*
5901 : : * set_function_size_estimates
5902 : : * Set the size estimates for a base relation that is a function call.
5903 : : *
5904 : : * The rel's targetlist and restrictinfo list must have been constructed
5905 : : * already.
5906 : : *
5907 : : * We set the same fields as set_tablefunc_size_estimates.
5908 : : */
5909 : : void
2594 alvherre@alvh.no-ip. 5910 : 254 : set_tablefunc_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5911 : : {
5912 : : /* Should only be applied to base relations that are functions */
5913 [ - + ]: 254 : Assert(rel->relid > 0);
2397 andrew@dunslane.net 5914 [ + - - + ]: 254 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_TABLEFUNC);
5915 : :
2594 alvherre@alvh.no-ip. 5916 : 254 : rel->tuples = 100;
5917 : :
5918 : : /* Now estimate number of output rows, etc */
5919 : 254 : set_baserel_size_estimates(root, rel);
5920 : 254 : }
5921 : :
5922 : : /*
5923 : : * set_values_size_estimates
5924 : : * Set the size estimates for a base relation that is a values list.
5925 : : *
5926 : : * The rel's targetlist and restrictinfo list must have been constructed
5927 : : * already.
5928 : : *
5929 : : * We set the same fields as set_baserel_size_estimates.
5930 : : */
5931 : : void
6465 mail@joeconway.com 5932 : 3858 : set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
5933 : : {
5934 : : RangeTblEntry *rte;
5935 : :
5936 : : /* Should only be applied to base relations that are values lists */
5937 [ - + ]: 3858 : Assert(rel->relid > 0);
6203 tgl@sss.pgh.pa.us 5938 [ + - ]: 3858 : rte = planner_rt_fetch(rel->relid, root);
6465 mail@joeconway.com 5939 [ - + ]: 3858 : Assert(rte->rtekind == RTE_VALUES);
5940 : :
5941 : : /*
5942 : : * Estimate number of rows the values list will return. We know this
5943 : : * precisely based on the list length (well, barring set-returning
5944 : : * functions in list items, but that's a refinement not catered for
5945 : : * anywhere else either).
5946 : : */
5947 : 3858 : rel->tuples = list_length(rte->values_lists);
5948 : :
5949 : : /* Now estimate number of output rows, etc */
5950 : 3858 : set_baserel_size_estimates(root, rel);
5951 : 3858 : }
5952 : :
5953 : : /*
5954 : : * set_cte_size_estimates
5955 : : * Set the size estimates for a base relation that is a CTE reference.
5956 : : *
5957 : : * The rel's targetlist and restrictinfo list must have been constructed
5958 : : * already, and we need an estimate of the number of rows returned by the CTE
5959 : : * (if a regular CTE) or the non-recursive term (if a self-reference).
5960 : : *
5961 : : * We set the same fields as set_baserel_size_estimates.
5962 : : */
5963 : : void
2960 tgl@sss.pgh.pa.us 5964 : 2009 : set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
5965 : : {
5966 : : RangeTblEntry *rte;
5967 : :
5968 : : /* Should only be applied to base relations that are CTE references */
5671 5969 [ - + ]: 2009 : Assert(rel->relid > 0);
5970 [ + - ]: 2009 : rte = planner_rt_fetch(rel->relid, root);
5971 [ - + ]: 2009 : Assert(rte->rtekind == RTE_CTE);
5972 : :
5973 [ + + ]: 2009 : if (rte->self_reference)
5974 : : {
5975 : : /*
5976 : : * In a self-reference, we assume the average worktable size is a
5977 : : * multiple of the nonrecursive term's size. The best multiplier will
5978 : : * vary depending on query "fan-out", so make its value adjustable.
5979 : : */
752 5980 : 406 : rel->tuples = clamp_row_est(recursive_worktable_factor * cte_rows);
5981 : : }
5982 : : else
5983 : : {
5984 : : /* Otherwise just believe the CTE's rowcount estimate */
2960 5985 : 1603 : rel->tuples = cte_rows;
5986 : : }
5987 : :
5988 : : /* Now estimate number of output rows, etc */
5671 5989 : 2009 : set_baserel_size_estimates(root, rel);
5990 : 2009 : }
5991 : :
5992 : : /*
5993 : : * set_namedtuplestore_size_estimates
5994 : : * Set the size estimates for a base relation that is a tuplestore reference.
5995 : : *
5996 : : * The rel's targetlist and restrictinfo list must have been constructed
5997 : : * already.
5998 : : *
5999 : : * We set the same fields as set_baserel_size_estimates.
6000 : : */
6001 : : void
2571 kgrittn@postgresql.o 6002 : 223 : set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6003 : : {
6004 : : RangeTblEntry *rte;
6005 : :
6006 : : /* Should only be applied to base relations that are tuplestore references */
6007 [ - + ]: 223 : Assert(rel->relid > 0);
6008 [ + - ]: 223 : rte = planner_rt_fetch(rel->relid, root);
6009 [ - + ]: 223 : Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
6010 : :
6011 : : /*
6012 : : * Use the estimate provided by the code which is generating the named
6013 : : * tuplestore. In some cases, the actual number might be available; in
6014 : : * others the same plan will be re-used, so a "typical" value might be
6015 : : * estimated and used.
6016 : : */
6017 : 223 : rel->tuples = rte->enrtuples;
6018 [ - + ]: 223 : if (rel->tuples < 0)
2571 kgrittn@postgresql.o 6019 :UBC 0 : rel->tuples = 1000;
6020 : :
6021 : : /* Now estimate number of output rows, etc */
2571 kgrittn@postgresql.o 6022 :CBC 223 : set_baserel_size_estimates(root, rel);
6023 : 223 : }
6024 : :
6025 : : /*
6026 : : * set_result_size_estimates
6027 : : * Set the size estimates for an RTE_RESULT base relation
6028 : : *
6029 : : * The rel's targetlist and restrictinfo list must have been constructed
6030 : : * already.
6031 : : *
6032 : : * We set the same fields as set_baserel_size_estimates.
6033 : : */
6034 : : void
1903 tgl@sss.pgh.pa.us 6035 : 781 : set_result_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6036 : : {
6037 : : /* Should only be applied to RTE_RESULT base relations */
6038 [ - + ]: 781 : Assert(rel->relid > 0);
6039 [ + - - + ]: 781 : Assert(planner_rt_fetch(rel->relid, root)->rtekind == RTE_RESULT);
6040 : :
6041 : : /* RTE_RESULT always generates a single row, natively */
6042 : 781 : rel->tuples = 1;
6043 : :
6044 : : /* Now estimate number of output rows, etc */
6045 : 781 : set_baserel_size_estimates(root, rel);
6046 : 781 : }
6047 : :
6048 : : /*
6049 : : * set_foreign_size_estimates
6050 : : * Set the size estimates for a base relation that is a foreign table.
6051 : : *
6052 : : * There is not a whole lot that we can do here; the foreign-data wrapper
6053 : : * is responsible for producing useful estimates. We can do a decent job
6054 : : * of estimating baserestrictcost, so we set that, and we also set up width
6055 : : * using what will be purely datatype-driven estimates from the targetlist.
6056 : : * There is no way to do anything sane with the rows value, so we just put
6057 : : * a default estimate and hope that the wrapper can improve on it. The
6058 : : * wrapper's GetForeignRelSize function will be called momentarily.
6059 : : *
6060 : : * The rel's targetlist and restrictinfo list must have been constructed
6061 : : * already.
6062 : : */
6063 : : void
4802 6064 : 1179 : set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
6065 : : {
6066 : : /* Should only be applied to base relations */
6067 [ - + ]: 1179 : Assert(rel->relid > 0);
6068 : :
6069 : 1179 : rel->rows = 1000; /* entirely bogus default estimate */
6070 : :
6071 : 1179 : cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
6072 : :
6073 : 1179 : set_rel_width(root, rel);
6074 : 1179 : }
6075 : :
6076 : :
6077 : : /*
6078 : : * set_rel_width
6079 : : * Set the estimated output width of a base relation.
6080 : : *
6081 : : * The estimated output width is the sum of the per-attribute width estimates
6082 : : * for the actually-referenced columns, plus any PHVs or other expressions
6083 : : * that have to be calculated at this relation. This is the amount of data
6084 : : * we'd need to pass upwards in case of a sort, hash, etc.
6085 : : *
6086 : : * This function also sets reltarget->cost, so it's a bit misnamed now.
6087 : : *
6088 : : * NB: this works best on plain relations because it prefers to look at
6089 : : * real Vars. For subqueries, set_subquery_size_estimates will already have
6090 : : * copied up whatever per-column estimates were made within the subquery,
6091 : : * and for other types of rels there isn't much we can do anyway. We fall
6092 : : * back on (fairly stupid) datatype-based width estimates if we can't get
6093 : : * any better number.
6094 : : *
6095 : : * The per-attribute width estimates are cached for possible re-use while
6096 : : * building join relations or post-scan/join pathtargets.
6097 : : */
6098 : : static void
6888 6099 : 214668 : set_rel_width(PlannerInfo *root, RelOptInfo *rel)
6100 : : {
5658 6101 [ + - ]: 214668 : Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
117 tgl@sss.pgh.pa.us 6102 :GNC 214668 : int64 tuple_width = 0;
4895 tgl@sss.pgh.pa.us 6103 :CBC 214668 : bool have_wholerow_var = false;
6104 : : ListCell *lc;
6105 : :
6106 : : /* Vars are assumed to have cost zero, but other exprs do not */
2953 6107 : 214668 : rel->reltarget->cost.startup = 0;
6108 : 214668 : rel->reltarget->cost.per_tuple = 0;
6109 : :
6110 [ + + + + : 751017 : foreach(lc, rel->reltarget->exprs)
+ + ]
6111 : : {
5654 6112 : 536349 : Node *node = (Node *) lfirst(lc);
6113 : :
6114 : : /*
6115 : : * Ordinarily, a Var in a rel's targetlist must belong to that rel;
6116 : : * but there are corner cases involving LATERAL references where that
6117 : : * isn't so. If the Var has the wrong varno, fall through to the
6118 : : * generic case (it doesn't seem worth the trouble to be any smarter).
6119 : : */
4249 6120 [ + + ]: 536349 : if (IsA(node, Var) &&
6121 [ + + ]: 526613 : ((Var *) node)->varno == rel->relid)
7253 6122 : 139408 : {
5654 6123 : 526580 : Var *var = (Var *) node;
6124 : : int ndx;
6125 : : int32 item_width;
6126 : :
6127 [ - + ]: 526580 : Assert(var->varattno >= rel->min_attr);
6128 [ - + ]: 526580 : Assert(var->varattno <= rel->max_attr);
6129 : :
6130 : 526580 : ndx = var->varattno - rel->min_attr;
6131 : :
6132 : : /*
6133 : : * If it's a whole-row Var, we'll deal with it below after we have
6134 : : * already cached as many attr widths as possible.
6135 : : */
4895 6136 [ + + ]: 526580 : if (var->varattno == 0)
6137 : : {
6138 : 1342 : have_wholerow_var = true;
6139 : 1342 : continue;
6140 : : }
6141 : :
6142 : : /*
6143 : : * The width may have been cached already (especially if it's a
6144 : : * subquery), so don't duplicate effort.
6145 : : */
5654 6146 [ + + ]: 525238 : if (rel->attr_widths[ndx] > 0)
6147 : : {
6148 : 101207 : tuple_width += rel->attr_widths[ndx];
7595 6149 : 101207 : continue;
6150 : : }
6151 : :
6152 : : /* Try to get column width from statistics */
4895 6153 [ + + + + ]: 424031 : if (reloid != InvalidOid && var->varattno > 0)
6154 : : {
5654 6155 : 330477 : item_width = get_attavgwidth(reloid, var->varattno);
6156 [ + + ]: 330477 : if (item_width > 0)
6157 : : {
6158 : 284623 : rel->attr_widths[ndx] = item_width;
6159 : 284623 : tuple_width += item_width;
6160 : 284623 : continue;
6161 : : }
6162 : : }
6163 : :
6164 : : /*
6165 : : * Not a plain relation, or can't find statistics for it. Estimate
6166 : : * using just the type info.
6167 : : */
6168 : 139408 : item_width = get_typavgwidth(var->vartype, var->vartypmod);
6169 [ - + ]: 139408 : Assert(item_width > 0);
6170 : 139408 : rel->attr_widths[ndx] = item_width;
6171 : 139408 : tuple_width += item_width;
6172 : : }
6173 [ + + ]: 9769 : else if (IsA(node, PlaceHolderVar))
6174 : : {
6175 : : /*
6176 : : * We will need to evaluate the PHV's contained expression while
6177 : : * scanning this rel, so be sure to include it in reltarget->cost.
6178 : : */
6179 : 578 : PlaceHolderVar *phv = (PlaceHolderVar *) node;
606 6180 : 578 : PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
6181 : : QualCost cost;
6182 : :
5654 6183 : 578 : tuple_width += phinfo->ph_width;
2978 6184 : 578 : cost_qual_eval_node(&cost, (Node *) phv->phexpr, root);
2953 6185 : 578 : rel->reltarget->cost.startup += cost.startup;
6186 : 578 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6187 : : }
6188 : : else
6189 : : {
6190 : : /*
6191 : : * We could be looking at an expression pulled up from a subquery,
6192 : : * or a ROW() representing a whole-row child Var, etc. Do what we
6193 : : * can using the expression type information.
6194 : : */
6195 : : int32 item_width;
6196 : : QualCost cost;
6197 : :
5391 6198 : 9191 : item_width = get_typavgwidth(exprType(node), exprTypmod(node));
6199 [ - + ]: 9191 : Assert(item_width > 0);
6200 : 9191 : tuple_width += item_width;
6201 : : /* Not entirely clear if we need to account for cost, but do so */
2978 6202 : 9191 : cost_qual_eval_node(&cost, node, root);
2953 6203 : 9191 : rel->reltarget->cost.startup += cost.startup;
6204 : 9191 : rel->reltarget->cost.per_tuple += cost.per_tuple;
6205 : : }
6206 : : }
6207 : :
6208 : : /*
6209 : : * If we have a whole-row reference, estimate its width as the sum of
6210 : : * per-column widths plus heap tuple header overhead.
6211 : : */
4895 6212 [ + + ]: 214668 : if (have_wholerow_var)
6213 : : {
117 tgl@sss.pgh.pa.us 6214 :GNC 1342 : int64 wholerow_width = MAXALIGN(SizeofHeapTupleHeader);
6215 : :
4895 tgl@sss.pgh.pa.us 6216 [ + + ]:CBC 1342 : if (reloid != InvalidOid)
6217 : : {
6218 : : /* Real relation, so estimate true tuple width */
6219 : 1044 : wholerow_width += get_relation_data_width(reloid,
2489 6220 : 1044 : rel->attr_widths - rel->min_attr);
6221 : : }
6222 : : else
6223 : : {
6224 : : /* Do what we can with info for a phony rel */
6225 : : AttrNumber i;
6226 : :
4895 6227 [ + + ]: 766 : for (i = 1; i <= rel->max_attr; i++)
6228 : 468 : wholerow_width += rel->attr_widths[i - rel->min_attr];
6229 : : }
6230 : :
117 tgl@sss.pgh.pa.us 6231 :GNC 1342 : rel->attr_widths[0 - rel->min_attr] = clamp_width_est(wholerow_width);
6232 : :
6233 : : /*
6234 : : * Include the whole-row Var as part of the output tuple. Yes, that
6235 : : * really is what happens at runtime.
6236 : : */
4895 tgl@sss.pgh.pa.us 6237 :CBC 1342 : tuple_width += wholerow_width;
6238 : : }
6239 : :
117 tgl@sss.pgh.pa.us 6240 :GNC 214668 : rel->reltarget->width = clamp_width_est(tuple_width);
10141 scrappy@hub.org 6241 :CBC 214668 : }
6242 : :
6243 : : /*
6244 : : * set_pathtarget_cost_width
6245 : : * Set the estimated eval cost and output width of a PathTarget tlist.
6246 : : *
6247 : : * As a notational convenience, returns the same PathTarget pointer passed in.
6248 : : *
6249 : : * Most, though not quite all, uses of this function occur after we've run
6250 : : * set_rel_width() for base relations; so we can usually obtain cached width
6251 : : * estimates for Vars. If we can't, fall back on datatype-based width
6252 : : * estimates. Present early-planning uses of PathTargets don't need accurate
6253 : : * widths badly enough to justify going to the catalogs for better data.
6254 : : */
6255 : : PathTarget *
2960 tgl@sss.pgh.pa.us 6256 : 279965 : set_pathtarget_cost_width(PlannerInfo *root, PathTarget *target)
6257 : : {
117 tgl@sss.pgh.pa.us 6258 :GNC 279965 : int64 tuple_width = 0;
6259 : : ListCell *lc;
6260 : :
6261 : : /* Vars are assumed to have cost zero, but other exprs do not */
2960 tgl@sss.pgh.pa.us 6262 :CBC 279965 : target->cost.startup = 0;
6263 : 279965 : target->cost.per_tuple = 0;
6264 : :
6265 [ + + + + : 927473 : foreach(lc, target->exprs)
+ + ]
6266 : : {
6267 : 647508 : Node *node = (Node *) lfirst(lc);
6268 : :
391 drowley@postgresql.o 6269 : 647508 : tuple_width += get_expr_width(root, node);
6270 : :
6271 : : /* For non-Vars, account for evaluation cost */
6272 [ + + ]: 647508 : if (!IsA(node, Var))
6273 : : {
6274 : : QualCost cost;
6275 : :
2960 tgl@sss.pgh.pa.us 6276 : 286780 : cost_qual_eval_node(&cost, node, root);
6277 : 286780 : target->cost.startup += cost.startup;
6278 : 286780 : target->cost.per_tuple += cost.per_tuple;
6279 : : }
6280 : : }
6281 : :
117 tgl@sss.pgh.pa.us 6282 :GNC 279965 : target->width = clamp_width_est(tuple_width);
6283 : :
2960 tgl@sss.pgh.pa.us 6284 :CBC 279965 : return target;
6285 : : }
6286 : :
6287 : : /*
6288 : : * get_expr_width
6289 : : * Estimate the width of the given expr attempting to use the width
6290 : : * cached in a Var's owning RelOptInfo, else fallback on the type's
6291 : : * average width when unable to or when the given Node is not a Var.
6292 : : */
6293 : : static int32
391 drowley@postgresql.o 6294 : 772960 : get_expr_width(PlannerInfo *root, const Node *expr)
6295 : : {
6296 : : int32 width;
6297 : :
6298 [ + + ]: 772960 : if (IsA(expr, Var))
6299 : : {
6300 : 480149 : const Var *var = (const Var *) expr;
6301 : :
6302 : : /* We should not see any upper-level Vars here */
6303 [ - + ]: 480149 : Assert(var->varlevelsup == 0);
6304 : :
6305 : : /* Try to get data from RelOptInfo cache */
6306 [ + + ]: 480149 : if (!IS_SPECIAL_VARNO(var->varno) &&
6307 [ + - ]: 477598 : var->varno < root->simple_rel_array_size)
6308 : : {
6309 : 477598 : RelOptInfo *rel = root->simple_rel_array[var->varno];
6310 : :
6311 [ + + ]: 477598 : if (rel != NULL &&
6312 [ + - ]: 465235 : var->varattno >= rel->min_attr &&
6313 [ + - ]: 465235 : var->varattno <= rel->max_attr)
6314 : : {
6315 : 465235 : int ndx = var->varattno - rel->min_attr;
6316 : :
6317 [ + + ]: 465235 : if (rel->attr_widths[ndx] > 0)
6318 : 452131 : return rel->attr_widths[ndx];
6319 : : }
6320 : : }
6321 : :
6322 : : /*
6323 : : * No cached data available, so estimate using just the type info.
6324 : : */
6325 : 28018 : width = get_typavgwidth(var->vartype, var->vartypmod);
6326 [ - + ]: 28018 : Assert(width > 0);
6327 : :
6328 : 28018 : return width;
6329 : : }
6330 : :
6331 : 292811 : width = get_typavgwidth(exprType(expr), exprTypmod(expr));
6332 [ - + ]: 292811 : Assert(width > 0);
6333 : 292811 : return width;
6334 : : }
6335 : :
6336 : : /*
6337 : : * relation_byte_size
6338 : : * Estimate the storage space in bytes for a given number of tuples
6339 : : * of a given width (size in bytes).
6340 : : */
6341 : : static double
8862 tgl@sss.pgh.pa.us 6342 : 1599005 : relation_byte_size(double tuples, int width)
6343 : : {
3340 6344 : 1599005 : return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));
6345 : : }
6346 : :
6347 : : /*
6348 : : * page_size
6349 : : * Returns an estimate of the number of pages covered by a given
6350 : : * number of tuples of a given width (size in bytes).
6351 : : */
6352 : : static double
8862 6353 : 4540 : page_size(double tuples, int width)
6354 : : {
6355 : 4540 : return ceil(relation_byte_size(tuples, width) / BLCKSZ);
6356 : : }
6357 : :
6358 : : /*
6359 : : * Estimate the fraction of the work that each worker will do given the
6360 : : * number of workers budgeted for the path.
6361 : : */
6362 : : static double
2648 rhaas@postgresql.org 6363 : 69808 : get_parallel_divisor(Path *path)
6364 : : {
6365 : 69808 : double parallel_divisor = path->parallel_workers;
6366 : :
6367 : : /*
6368 : : * Early experience with parallel query suggests that when there is only
6369 : : * one worker, the leader often makes a very substantial contribution to
6370 : : * executing the parallel portion of the plan, but as more workers are
6371 : : * added, it does less and less, because it's busy reading tuples from the
6372 : : * workers and doing whatever non-parallel post-processing is needed. By
6373 : : * the time we reach 4 workers, the leader no longer makes a meaningful
6374 : : * contribution. Thus, for now, estimate that the leader spends 30% of
6375 : : * its time servicing each worker, and the remainder executing the
6376 : : * parallel plan.
6377 : : */
2342 6378 [ + + ]: 69808 : if (parallel_leader_participation)
6379 : : {
6380 : : double leader_contribution;
6381 : :
6382 : 69424 : leader_contribution = 1.0 - (0.3 * path->parallel_workers);
6383 [ + + ]: 69424 : if (leader_contribution > 0)
6384 : 68935 : parallel_divisor += leader_contribution;
6385 : : }
6386 : :
2648 6387 : 69808 : return parallel_divisor;
6388 : : }
6389 : :
6390 : : /*
6391 : : * compute_bitmap_pages
6392 : : * Estimate number of pages fetched from heap in a bitmap heap scan.
6393 : : *
6394 : : * 'baserel' is the relation to be scanned
6395 : : * 'bitmapqual' is a tree of IndexPaths, BitmapAndPaths, and BitmapOrPaths
6396 : : * 'loop_count' is the number of repetitions of the indexscan to factor into
6397 : : * estimates of caching behavior
6398 : : *
6399 : : * If cost_p isn't NULL, the indexTotalCost estimate is returned in *cost_p.
6400 : : * If tuples_p isn't NULL, the tuples_fetched estimate is returned in *tuples_p.
6401 : : */
6402 : : double
118 tgl@sss.pgh.pa.us 6403 :GNC 277494 : compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel,
6404 : : Path *bitmapqual, double loop_count,
6405 : : Cost *cost_p, double *tuples_p)
6406 : : {
6407 : : Cost indexTotalCost;
6408 : : Selectivity indexSelectivity;
6409 : : double T;
6410 : : double pages_fetched;
6411 : : double tuples_fetched;
6412 : : double heap_pages;
6413 : : long maxentries;
6414 : :
6415 : : /*
6416 : : * Fetch total cost of obtaining the bitmap, as well as its total
6417 : : * selectivity.
6418 : : */
2634 rhaas@postgresql.org 6419 :CBC 277494 : cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
6420 : :
6421 : : /*
6422 : : * Estimate number of main-table pages fetched.
6423 : : */
6424 : 277494 : tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
6425 : :
6426 [ + + ]: 277494 : T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
6427 : :
6428 : : /*
6429 : : * For a single scan, the number of heap pages that need to be fetched is
6430 : : * the same as the Mackert and Lohman formula for the case T <= b (ie, no
6431 : : * re-reads needed).
6432 : : */
2347 6433 : 277494 : pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
6434 : :
6435 : : /*
6436 : : * Calculate the number of pages fetched from the heap. Then based on
6437 : : * current work_mem estimate get the estimated maxentries in the bitmap.
6438 : : * (Note that we always do this calculation based on the number of pages
6439 : : * that would be fetched in a single iteration, even if loop_count > 1.
6440 : : * That's correct, because only that number of entries will be stored in
6441 : : * the bitmap at one time.)
6442 : : */
6443 [ + + ]: 277494 : heap_pages = Min(pages_fetched, baserel->pages);
6444 : 277494 : maxentries = tbm_calculate_entries(work_mem * 1024L);
6445 : :
2634 6446 [ + + ]: 277494 : if (loop_count > 1)
6447 : : {
6448 : : /*
6449 : : * For repeated bitmap scans, scale up the number of tuples fetched in
6450 : : * the Mackert and Lohman formula by the number of scans, so that we
6451 : : * estimate the number of pages fetched by all the scans. Then
6452 : : * pro-rate for one scan.
6453 : : */
6454 : 51253 : pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
6455 : : baserel->pages,
6456 : : get_indexpath_pages(bitmapqual),
6457 : : root);
6458 : 51253 : pages_fetched /= loop_count;
6459 : : }
6460 : :
6461 [ + + ]: 277494 : if (pages_fetched >= T)
6462 : 25266 : pages_fetched = T;
6463 : : else
6464 : 252228 : pages_fetched = ceil(pages_fetched);
6465 : :
2347 6466 [ + + ]: 277494 : if (maxentries < heap_pages)
6467 : : {
6468 : : double exact_pages;
6469 : : double lossy_pages;
6470 : :
6471 : : /*
6472 : : * Crude approximation of the number of lossy pages. Because of the
6473 : : * way tbm_lossify() is coded, the number of lossy pages increases
6474 : : * very sharply as soon as we run short of memory; this formula has
6475 : : * that property and seems to perform adequately in testing, but it's
6476 : : * possible we could do better somehow.
6477 : : */
6478 [ - + ]: 9 : lossy_pages = Max(0, heap_pages - maxentries / 2);
6479 : 9 : exact_pages = heap_pages - lossy_pages;
6480 : :
6481 : : /*
6482 : : * If there are lossy pages then recompute the number of tuples
6483 : : * processed by the bitmap heap node. We assume here that the chance
6484 : : * of a given tuple coming from an exact page is the same as the
6485 : : * chance that a given page is exact. This might not be true, but
6486 : : * it's not clear how we can do any better.
6487 : : */
6488 [ + - ]: 9 : if (lossy_pages > 0)
6489 : : tuples_fetched =
6490 : 9 : clamp_row_est(indexSelectivity *
6491 : 9 : (exact_pages / heap_pages) * baserel->tuples +
6492 : 9 : (lossy_pages / heap_pages) * baserel->tuples);
6493 : : }
6494 : :
118 tgl@sss.pgh.pa.us 6495 [ + + ]:GNC 277494 : if (cost_p)
6496 : 214411 : *cost_p = indexTotalCost;
6497 [ + + ]: 277494 : if (tuples_p)
6498 : 214411 : *tuples_p = tuples_fetched;
6499 : :
2634 rhaas@postgresql.org 6500 :CBC 277494 : return pages_fetched;
6501 : : }
|